mirror of
https://github.com/ceph/s3-tests.git
synced 2024-11-21 11:51:06 +00:00
nuke_prefixed_buckets waits up to 60 seconds for object locks to expire
objects locked in GOVERNANCE mode can be removed with BypassGovernanceRetention, but some tests may leave an object locked in COMPLIANCE mode, which blocks deletion until the retention period expires nuke_prefixed_buckets now checks the retention policy of objects that it fails to delete with AccessDenied, and will wait up to 60 seconds for locks to expire before retrying the deletes. if the wait exceeds 60 seconds, it instead throws an error without deleting the bucket instead of doing this in nuke_prefixed_buckets, we could potentially have each object-lock test case handle this manually, but that would add a separate delay to each test case Signed-off-by: Casey Bodley <cbodley@redhat.com>
This commit is contained in:
parent
bb995c2aeb
commit
9c4f15a47e
1 changed files with 34 additions and 1 deletions
|
@ -4,6 +4,8 @@ from botocore.client import Config
|
|||
from botocore.exceptions import ClientError
|
||||
from botocore.handlers import disable_signing
|
||||
import configparser
|
||||
import datetime
|
||||
import time
|
||||
import os
|
||||
import munch
|
||||
import random
|
||||
|
@ -101,10 +103,41 @@ def nuke_bucket(client, bucket):
|
|||
|
||||
# list and delete objects in batches
|
||||
for objects in list_versions(client, bucket, batch_size):
|
||||
client.delete_objects(Bucket=bucket,
|
||||
delete = client.delete_objects(Bucket=bucket,
|
||||
Delete={'Objects': objects, 'Quiet': True},
|
||||
BypassGovernanceRetention=True)
|
||||
|
||||
# check for object locks on 403 AccessDenied errors
|
||||
for err in delete.get('Errors', []):
|
||||
if err.get('Code') != 'AccessDenied':
|
||||
continue
|
||||
try:
|
||||
res = client.get_object_retention(Bucket=bucket,
|
||||
Key=err['Key'], VersionId=err['VersionId'])
|
||||
retain_date = res['Retention']['RetainUntilDate']
|
||||
if not max_retain_date or max_retain_date < retain_date:
|
||||
max_retain_date = retain_date
|
||||
except ClientError:
|
||||
pass
|
||||
|
||||
if max_retain_date:
|
||||
# wait out the retention period (up to 60 seconds)
|
||||
now = datetime.datetime.now(max_retain_date.tzinfo)
|
||||
if max_retain_date > now:
|
||||
delta = max_retain_date - now
|
||||
if delta.total_seconds() > 60:
|
||||
raise RuntimeError('bucket {} still has objects \
|
||||
locked for {} more seconds, not waiting for \
|
||||
bucket cleanup'.format(bucket, delta.total_seconds()))
|
||||
print('nuke_bucket', bucket, 'waiting', delta.total_seconds(),
|
||||
'seconds for object locks to expire')
|
||||
time.sleep(delta.total_seconds())
|
||||
|
||||
for objects in list_versions(client, bucket, batch_size):
|
||||
client.delete_objects(Bucket=bucket,
|
||||
Delete={'Objects': objects, 'Quiet': True},
|
||||
BypassGovernanceRetention=True)
|
||||
|
||||
client.delete_bucket(Bucket=bucket)
|
||||
|
||||
def nuke_prefixed_buckets(prefix, client=None):
|
||||
|
|
Loading…
Reference in a new issue