From 9c4f15a47e1ee899a7dc453a4971a164e10521d8 Mon Sep 17 00:00:00 2001 From: Casey Bodley Date: Wed, 4 Aug 2021 15:00:04 -0400 Subject: [PATCH] nuke_prefixed_buckets waits up to 60 seconds for object locks to expire objects locked in GOVERNANCE mode can be removed with BypassGovernanceRetention, but some tests may leave an object locked in COMPLIANCE mode, which blocks deletion until the retention period expires nuke_prefixed_buckets now checks the retention policy of objects that it fails to delete with AccessDenied, and will wait up to 60 seconds for locks to expire before retrying the deletes. if the wait exceeds 60 seconds, it instead throws an error without deleting the bucket instead of doing this in nuke_prefixed_buckets, we could potentially have each object-lock test case handle this manually, but that would add a separate delay to each test case Signed-off-by: Casey Bodley --- s3tests_boto3/functional/__init__.py | 35 +++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/s3tests_boto3/functional/__init__.py b/s3tests_boto3/functional/__init__.py index ee91a85..40c6b25 100644 --- a/s3tests_boto3/functional/__init__.py +++ b/s3tests_boto3/functional/__init__.py @@ -4,6 +4,8 @@ from botocore.client import Config from botocore.exceptions import ClientError from botocore.handlers import disable_signing import configparser +import datetime +import time import os import munch import random @@ -101,10 +103,41 @@ def nuke_bucket(client, bucket): # list and delete objects in batches for objects in list_versions(client, bucket, batch_size): - client.delete_objects(Bucket=bucket, + delete = client.delete_objects(Bucket=bucket, Delete={'Objects': objects, 'Quiet': True}, BypassGovernanceRetention=True) + # check for object locks on 403 AccessDenied errors + for err in delete.get('Errors', []): + if err.get('Code') != 'AccessDenied': + continue + try: + res = client.get_object_retention(Bucket=bucket, + Key=err['Key'], VersionId=err['VersionId']) + retain_date = res['Retention']['RetainUntilDate'] + if not max_retain_date or max_retain_date < retain_date: + max_retain_date = retain_date + except ClientError: + pass + + if max_retain_date: + # wait out the retention period (up to 60 seconds) + now = datetime.datetime.now(max_retain_date.tzinfo) + if max_retain_date > now: + delta = max_retain_date - now + if delta.total_seconds() > 60: + raise RuntimeError('bucket {} still has objects \ +locked for {} more seconds, not waiting for \ +bucket cleanup'.format(bucket, delta.total_seconds())) + print('nuke_bucket', bucket, 'waiting', delta.total_seconds(), + 'seconds for object locks to expire') + time.sleep(delta.total_seconds()) + + for objects in list_versions(client, bucket, batch_size): + client.delete_objects(Bucket=bucket, + Delete={'Objects': objects, 'Quiet': True}, + BypassGovernanceRetention=True) + client.delete_bucket(Bucket=bucket) def nuke_prefixed_buckets(prefix, client=None):