Enable lifecycle tests

Add an option to configure lc debug interval and adjust lifecycle
tests sleep as per the value set.

Signed-off-by: Soumya Koduri <skoduri@redhat.com>
This commit is contained in:
Soumya Koduri 2021-12-17 16:55:49 +05:30 committed by Casey Bodley
parent e5617b783f
commit 23cd521b5f
3 changed files with 109 additions and 47 deletions

View file

@ -41,6 +41,12 @@ secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
## replace with key id obtained when secret is created, or delete if KMS not tested
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
## Storage classes
#storage_classes = "LUKEWARM, FROZEN"
## Lifecycle debug interval (default: 10)
#lc_debug_interval = 20
[s3 alt]
# alt display_name set in vstart.sh
display_name = john.doe

View file

@ -167,12 +167,15 @@ def nuke_prefixed_buckets(prefix, client=None):
def configured_storage_classes():
sc = ['STANDARD']
extra_sc = re.split('\W+', config.storage_classes)
extra_sc = re.split(r"[\b\W\b]+", config.storage_classes)
for item in extra_sc:
if item != 'STANDARD':
sc.append(item)
sc = [i for i in sc if i]
print("storage classes configured: " + str(sc))
return sc
def setup():
@ -244,6 +247,11 @@ def setup():
config.storage_classes = ""
pass
try:
config.lc_debug_interval = int(cfg.get('s3 main',"lc_debug_interval"))
except (configparser.NoSectionError, configparser.NoOptionError):
config.lc_debug_interval = 10
config.alt_access_key = cfg.get('s3 alt',"access_key")
config.alt_secret_key = cfg.get('s3 alt',"secret_key")
config.alt_display_name = cfg.get('s3 alt',"display_name")
@ -685,3 +693,6 @@ def get_cloud_target_path():
def get_cloud_target_storage_class():
return config.cloud_target_storage_class
def get_lc_debug_interval():
return config.lc_debug_interval

View file

@ -82,6 +82,7 @@ from . import (
get_cloud_client,
nuke_prefixed_buckets,
configured_storage_classes,
get_lc_debug_interval,
)
@ -9164,21 +9165,23 @@ def test_lifecycle_expiration():
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 4}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
{'ID': 'rule2', 'Expiration': {'Days': 5}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(28)
lc_interval = get_lc_debug_interval()
time.sleep(3*lc_interval)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
time.sleep(10)
time.sleep(lc_interval)
response = client.list_objects(Bucket=bucket_name)
keep2_objects = response['Contents']
time.sleep(20)
time.sleep(3*lc_interval)
response = client.list_objects(Bucket=bucket_name)
expire3_objects = response['Contents']
@ -9199,21 +9202,23 @@ def test_lifecyclev2_expiration():
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 4}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
{'ID': 'rule2', 'Expiration': {'Days': 5}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects_v2(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(28)
lc_interval = get_lc_debug_interval()
time.sleep(3*lc_interval)
response = client.list_objects_v2(Bucket=bucket_name)
expire1_objects = response['Contents']
time.sleep(10)
time.sleep(lc_interval)
response = client.list_objects_v2(Bucket=bucket_name)
keep2_objects = response['Contents']
time.sleep(20)
time.sleep(3*lc_interval)
response = client.list_objects_v2(Bucket=bucket_name)
expire3_objects = response['Contents']
@ -9238,7 +9243,10 @@ def test_lifecycle_expiration_versioning_enabled():
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(30)
lc_interval = get_lc_debug_interval()
time.sleep(3*lc_interval)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
@ -9289,7 +9297,9 @@ def test_lifecycle_expiration_tags1():
Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
time.sleep(28)
lc_interval = get_lc_debug_interval()
time.sleep(3*lc_interval)
try:
expire_objects = response['Contents']
@ -9367,7 +9377,9 @@ def test_lifecycle_expiration_tags2():
response = setup_lifecycle_tags2(client, bucket_name)
time.sleep(28)
lc_interval = get_lc_debug_interval()
time.sleep(3*lc_interval)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
@ -9388,7 +9400,9 @@ def test_lifecycle_expiration_versioned_tags2():
response = setup_lifecycle_tags2(client, bucket_name)
time.sleep(28)
lc_interval = get_lc_debug_interval()
time.sleep(3*lc_interval)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
@ -9459,14 +9473,16 @@ def test_lifecycle_expiration_noncur_tags1():
# noncurrent version expiration at 4 "days"
response = setup_lifecycle_noncur_tags(client, bucket_name, 4)
lc_interval = get_lc_debug_interval()
num_objs = verify_lifecycle_expiration_noncur_tags(
client, bucket_name, 20)
client, bucket_name, 2*lc_interval)
# at T+20, 10 objects should exist
eq(num_objs, 10)
num_objs = verify_lifecycle_expiration_noncur_tags(
client, bucket_name, 40)
client, bucket_name, 5*lc_interval)
# at T+60, only the current object version should exist
eq(num_objs, 1)
@ -9580,7 +9596,10 @@ def test_lifecycle_expiration_date():
response = client.list_objects(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(20)
lc_interval = get_lc_debug_interval()
# Wait for first expiration (plus fudge to handle the timer window)
time.sleep(3*lc_interval)
response = client.list_objects(Bucket=bucket_name)
expire_objects = response['Contents']
@ -9814,7 +9833,11 @@ def test_lifecycle_noncur_expiration():
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
lc_interval = get_lc_debug_interval()
# Wait for first expiration (plus fudge to handle the timer window)
time.sleep(5*lc_interval)
response = client.list_object_versions(Bucket=bucket_name)
expire_versions = response['Versions']
@ -9880,7 +9903,11 @@ def test_lifecycle_deletemarker_expiration():
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 1}, 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
lc_interval = get_lc_debug_interval()
# Wait for first expiration (plus fudge to handle the timer window)
time.sleep(5*lc_interval)
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
@ -9933,7 +9960,11 @@ def test_lifecycle_multipart_expiration():
]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
lc_interval = get_lc_debug_interval()
# Wait for first expiration (plus fudge to handle the timer window)
time.sleep(5*lc_interval)
response = client.list_multipart_uploads(Bucket=bucket_name)
expired_uploads = response['Uploads']
@ -9996,7 +10027,7 @@ def test_lifecycle_transition():
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc[1]}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
{'ID': 'rule2', 'Transitions': [{'Days': 4, 'StorageClass': sc[2]}], 'Prefix': 'expire3/', 'Status': 'Enabled'}]
{'ID': 'rule2', 'Transitions': [{'Days': 6, 'StorageClass': sc[2]}], 'Prefix': 'expire3/', 'Status': 'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
@ -10005,22 +10036,24 @@ def test_lifecycle_transition():
init_keys = _get_keys(response)
eq(len(init_keys), 6)
lc_interval = get_lc_debug_interval()
# Wait for first expiration (plus fudge to handle the timer window)
time.sleep(25)
time.sleep(4*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket_name)
eq(len(expire1_keys['STANDARD']), 4)
eq(len(expire1_keys[sc[1]]), 2)
eq(len(expire1_keys[sc[2]]), 0)
# Wait for next expiration cycle
time.sleep(10)
time.sleep(lc_interval)
keep2_keys = list_bucket_storage_class(client, bucket_name)
eq(len(keep2_keys['STANDARD']), 4)
eq(len(keep2_keys[sc[1]]), 2)
eq(len(keep2_keys[sc[2]]), 0)
# Wait for final expiration cycle
time.sleep(20)
time.sleep(5*lc_interval)
expire3_keys = list_bucket_storage_class(client, bucket_name)
eq(len(expire3_keys['STANDARD']), 2)
eq(len(expire3_keys[sc[1]]), 2)
@ -10041,7 +10074,7 @@ def test_lifecycle_transition_single_rule_multi_trans():
bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc[1]}, {'Days': 4, 'StorageClass': sc[2]}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc[1]}, {'Days': 6, 'StorageClass': sc[2]}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
@ -10050,22 +10083,24 @@ def test_lifecycle_transition_single_rule_multi_trans():
init_keys = _get_keys(response)
eq(len(init_keys), 6)
lc_interval = get_lc_debug_interval()
# Wait for first expiration (plus fudge to handle the timer window)
time.sleep(25)
time.sleep(4*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket_name)
eq(len(expire1_keys['STANDARD']), 4)
eq(len(expire1_keys[sc[1]]), 2)
eq(len(expire1_keys[sc[2]]), 0)
# Wait for next expiration cycle
time.sleep(10)
time.sleep(lc_interval)
keep2_keys = list_bucket_storage_class(client, bucket_name)
eq(len(keep2_keys['STANDARD']), 4)
eq(len(keep2_keys[sc[1]]), 2)
eq(len(keep2_keys[sc[2]]), 0)
# Wait for final expiration cycle
time.sleep(20)
time.sleep(5*lc_interval)
expire3_keys = list_bucket_storage_class(client, bucket_name)
eq(len(expire3_keys['STANDARD']), 4)
eq(len(expire3_keys[sc[1]]), 0)
@ -10137,12 +10172,12 @@ def test_lifecycle_noncur_transition():
'StorageClass': sc[1]
},
{
'NoncurrentDays': 3,
'NoncurrentDays': 5,
'StorageClass': sc[2]
}
],
'NoncurrentVersionExpiration': {
'NoncurrentDays': 5
'NoncurrentDays': 9
}
}
]
@ -10155,19 +10190,21 @@ def test_lifecycle_noncur_transition():
init_keys = list_bucket_storage_class(client, bucket)
eq(len(init_keys['STANDARD']), 6)
time.sleep(25)
lc_interval = get_lc_debug_interval()
time.sleep(4*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket)
eq(len(expire1_keys['STANDARD']), 2)
eq(len(expire1_keys[sc[1]]), 4)
eq(len(expire1_keys[sc[2]]), 0)
time.sleep(20)
time.sleep(4*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket)
eq(len(expire1_keys['STANDARD']), 2)
eq(len(expire1_keys[sc[1]]), 0)
eq(len(expire1_keys[sc[2]]), 4)
time.sleep(20)
time.sleep(4*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket)
eq(len(expire1_keys['STANDARD']), 2)
eq(len(expire1_keys[sc[1]]), 0)
@ -10217,8 +10254,10 @@ def test_lifecycle_cloud_transition():
init_keys = _get_keys(response)
eq(len(init_keys), 4)
lc_interval = get_lc_debug_interval()
# Wait for first expiration (plus fudge to handle the timer window)
time.sleep(30)
time.sleep(3*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket_name)
eq(len(expire1_keys['STANDARD']), 2)
@ -10227,7 +10266,7 @@ def test_lifecycle_cloud_transition():
else:
eq(len(expire1_keys[cloud_sc]), 0)
time.sleep(20)
time.sleep(2*lc_interval)
# Check if objects copied to target path
if target_path == None:
target_path = "rgwx-default-" + cloud_sc.lower() + "-cloud-bucket"
@ -10235,7 +10274,7 @@ def test_lifecycle_cloud_transition():
cloud_client = get_cloud_client()
time.sleep(100)
time.sleep(10*lc_interval)
expire1_key1_str = prefix + keys[0]
verify_object(cloud_client, target_path, expire1_key1_str, keys[0], target_sc)
@ -10298,8 +10337,8 @@ def test_lifecycle_cloud_multiple_transition():
bucket_name = _create_objects(keys=keys)
client = get_client()
rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc1}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
{'ID': 'rule2', 'Transitions': [{'Days': 6, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
{'ID': 'rule3', 'Expiration': {'Days': 8}, 'Prefix': 'expire1/', 'Status': 'Enabled'}]
{'ID': 'rule2', 'Transitions': [{'Days': 5, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
{'ID': 'rule3', 'Expiration': {'Days': 9}, 'Prefix': 'expire1/', 'Status': 'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
@ -10308,15 +10347,17 @@ def test_lifecycle_cloud_multiple_transition():
init_keys = _get_keys(response)
eq(len(init_keys), 4)
lc_interval = get_lc_debug_interval()
# Wait for first expiration (plus fudge to handle the timer window)
time.sleep(50)
time.sleep(3*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket_name)
eq(len(expire1_keys['STANDARD']), 2)
eq(len(expire1_keys[sc[1]]), 2)
eq(len(expire1_keys[sc[2]]), 0)
# Wait for next expiration cycle
time.sleep(50)
time.sleep(4*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket_name)
eq(len(expire1_keys['STANDARD']), 2)
eq(len(expire1_keys[sc[1]]), 0)
@ -10327,7 +10368,7 @@ def test_lifecycle_cloud_multiple_transition():
eq(len(expire1_keys[sc[2]]), 0)
# Wait for final expiration cycle
time.sleep(60)
time.sleep(4*lc_interval)
expire3_keys = list_bucket_storage_class(client, bucket_name)
eq(len(expire3_keys['STANDARD']), 2)
eq(len(expire3_keys[sc[1]]), 0)
@ -10373,7 +10414,7 @@ def test_lifecycle_noncur_cloud_transition():
'StorageClass': sc[1]
},
{
'NoncurrentDays': 3,
'NoncurrentDays': 5,
'StorageClass': sc[2]
}
],
@ -10392,13 +10433,15 @@ def test_lifecycle_noncur_cloud_transition():
response = client.list_object_versions(Bucket=bucket)
time.sleep(25)
lc_interval = get_lc_debug_interval()
time.sleep(3*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket)
eq(len(expire1_keys['STANDARD']), 2)
eq(len(expire1_keys[sc[1]]), 4)
eq(len(expire1_keys[sc[2]]), 0)
time.sleep(80)
time.sleep(5*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket)
eq(len(expire1_keys['STANDARD']), 2)
eq(len(expire1_keys[sc[1]]), 0)
@ -10415,7 +10458,7 @@ def test_lifecycle_noncur_cloud_transition():
cloud_client = get_cloud_client()
time.sleep(10)
time.sleep(lc_interval)
result = list_bucket_versions(client, bucket)
for src_key in keys:
@ -10455,8 +10498,10 @@ def test_lifecycle_cloud_transition_large_obj():
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
lc_interval = get_lc_debug_interval()
# Wait for first expiration (plus fudge to handle the timer window)
time.sleep(30)
time.sleep(3*lc_interval)
expire1_keys = list_bucket_storage_class(client, bucket)
eq(len(expire1_keys['STANDARD']), 1)
@ -10472,7 +10517,7 @@ def test_lifecycle_cloud_transition_large_obj():
prefix = bucket + "/"
# multipart upload takes time
time.sleep(10)
time.sleep(3*lc_interval)
cloud_client = get_cloud_client()
expire1_key1_str = prefix + keys[1]