mirror of
https://github.com/ceph/s3-tests.git
synced 2025-02-23 05:54:30 +00:00
rgw/logging: add cleanup tests
* use flushing api instead of sleep when possible * test flushing concurrency Signed-off-by: Yuval Lifshitz <ylifshit@ibm.com>
This commit is contained in:
parent
87df05aaa4
commit
75693ccff7
1 changed files with 501 additions and 78 deletions
|
@ -14319,6 +14319,14 @@ def test_put_bucket_logging():
|
|||
assert response['LoggingEnabled'] == logging_enabled
|
||||
|
||||
|
||||
def _flush_logs(client, src_bucket_name, dummy_key="dummy"):
|
||||
if _has_bucket_logging_extension():
|
||||
client.post_bucket_logging(Bucket=src_bucket_name)
|
||||
else:
|
||||
time.sleep(expected_object_roll_time*1.1)
|
||||
client.put_object(Bucket=src_bucket_name, Key=dummy_key, Body='dummy')
|
||||
|
||||
|
||||
def _bucket_logging_key_filter(log_type):
|
||||
src_bucket_name = get_new_bucket_name()
|
||||
src_bucket = get_new_bucket_resource(name=src_bucket_name)
|
||||
|
@ -14374,8 +14382,7 @@ def _bucket_logging_key_filter(log_type):
|
|||
|
||||
expected_count = len(names)
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=src_bucket_name, Key='test/dummy.txt', Body='dummy')
|
||||
_flush_logs(client, src_bucket_name, dummy_key="test/dummy.txt")
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -14404,74 +14411,133 @@ def test_bucket_logging_key_filter_j():
|
|||
_bucket_logging_key_filter('Journal')
|
||||
|
||||
|
||||
def _bucket_logging_flush(log_type):
|
||||
src_bucket_name = get_new_bucket_name()
|
||||
src_bucket = get_new_bucket_resource(name=src_bucket_name)
|
||||
def _bucket_logging_flush(logging_type, single_prefix, concurrency):
|
||||
if not _has_bucket_logging_extension():
|
||||
pytest.skip('ceph extension to bucket logging not supported at client')
|
||||
log_bucket_name = get_new_bucket_name()
|
||||
log_bucket = get_new_bucket_resource(name=log_bucket_name)
|
||||
client = get_client()
|
||||
|
||||
logging_enabled = {
|
||||
'TargetBucket': log_bucket_name,
|
||||
'LoggingType': log_type,
|
||||
'TargetPrefix': 'log/',
|
||||
'ObjectRollTime': 300, # 5 minutes
|
||||
'TargetObjectKeyFormat': {'SimplePrefix': {}},
|
||||
'RecordsBatchSize': 0,
|
||||
}
|
||||
|
||||
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
|
||||
'LoggingEnabled': logging_enabled,
|
||||
})
|
||||
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
|
||||
|
||||
response = client.get_bucket_logging(Bucket=src_bucket_name)
|
||||
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
|
||||
if log_type == 'Journal':
|
||||
assert response['LoggingEnabled'] == logging_enabled
|
||||
elif log_type == 'Standard':
|
||||
print('TODO')
|
||||
else:
|
||||
assert False, 'unknown log type: %s' % log_type
|
||||
|
||||
num_keys = 5
|
||||
for j in range(num_keys):
|
||||
name = 'myobject'+str(j)
|
||||
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
|
||||
|
||||
response = client.list_objects_v2(Bucket=src_bucket_name)
|
||||
src_keys = _get_keys(response)
|
||||
|
||||
response = client.post_bucket_logging(Bucket=src_bucket_name)
|
||||
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
|
||||
num_buckets = 5
|
||||
buckets = []
|
||||
longer_time = expected_object_roll_time*10
|
||||
for j in range(num_buckets):
|
||||
src_bucket_name = get_new_bucket_name()
|
||||
src_bucket = get_new_bucket_resource(name=src_bucket_name)
|
||||
logging_enabled = {'TargetBucket': log_bucket_name,
|
||||
'ObjectRollTime': longer_time, 'LoggingType': logging_type}
|
||||
if single_prefix:
|
||||
logging_enabled['TargetPrefix'] = 'log/'
|
||||
else:
|
||||
logging_enabled['TargetPrefix'] = src_bucket_name+'/'
|
||||
|
||||
expected_count = num_keys
|
||||
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
|
||||
'LoggingEnabled': logging_enabled,
|
||||
})
|
||||
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
|
||||
buckets.append(src_bucket_name)
|
||||
|
||||
num_keys = 10
|
||||
src_names = []
|
||||
for j in range(num_keys):
|
||||
src_names.append('myobject'+str(j))
|
||||
|
||||
for src_bucket_name in buckets:
|
||||
for name in src_names:
|
||||
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
|
||||
client.delete_object(Bucket=src_bucket_name, Key=name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
assert len(keys) == 1
|
||||
assert len(keys) == 0
|
||||
|
||||
t = []
|
||||
for src_bucket_name in buckets:
|
||||
if concurrency:
|
||||
thr = threading.Thread(target = client.post_bucket_logging,
|
||||
kwargs={'Bucket': src_bucket_name})
|
||||
thr.start()
|
||||
t.append(thr)
|
||||
else:
|
||||
client.post_bucket_logging(Bucket=src_bucket_name)
|
||||
if single_prefix and logging_type == 'Standard':
|
||||
# in case of single prefix we flush only once
|
||||
# because flushing itself will be logged
|
||||
# and the next flush will commit the log
|
||||
break
|
||||
|
||||
_do_wait_completion(t)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
||||
if single_prefix:
|
||||
assert len(keys) == 1
|
||||
else:
|
||||
assert len(keys) >= num_buckets
|
||||
|
||||
for key in keys:
|
||||
assert key.startswith('log/')
|
||||
response = client.get_object(Bucket=log_bucket_name, Key=key)
|
||||
body = _get_body(response)
|
||||
assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, log_type, expected_count)
|
||||
found = False
|
||||
for src_bucket_name in buckets:
|
||||
if single_prefix:
|
||||
prefix = 'log/'
|
||||
else:
|
||||
prefix = src_bucket_name+'/'
|
||||
if key.startswith(prefix):
|
||||
found = True
|
||||
assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_names, logging_type, num_keys)
|
||||
assert _verify_records(body, src_bucket_name, 'REST.DELETE.OBJECT', src_names, logging_type, num_keys)
|
||||
assert found
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_flush_j():
|
||||
if not _has_bucket_logging_extension():
|
||||
pytest.skip('ceph extension to bucket logging not supported at client')
|
||||
_bucket_logging_flush('Journal')
|
||||
_bucket_logging_flush('Journal', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_flush_s():
|
||||
if not _has_bucket_logging_extension():
|
||||
pytest.skip('ceph extension to bucket logging not supported at client')
|
||||
_bucket_logging_flush('Standard')
|
||||
_bucket_logging_flush('Standard', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_flush_j_single():
|
||||
_bucket_logging_flush('Journal', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_flush_s_single():
|
||||
_bucket_logging_flush('Standard', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_concurrent_flush_j():
|
||||
_bucket_logging_flush('Journal', False, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_concurrent_flush_s():
|
||||
_bucket_logging_flush('Standard', False, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_concurrent_flush_j_single():
|
||||
_bucket_logging_flush('Journal', True, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_concurrent_flush_s_single():
|
||||
_bucket_logging_flush('Standard', True, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
|
@ -14575,8 +14641,7 @@ def _bucket_logging_tenant_objects(src_client, src_bucket_name, log_client, log_
|
|||
response = src_client.list_objects_v2(Bucket=src_bucket_name)
|
||||
src_keys = _get_keys(response)
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
src_client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
|
||||
_flush_logs(src_client, src_bucket_name)
|
||||
|
||||
response = log_client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -14808,8 +14873,7 @@ def _bucket_logging_put_objects(versioned):
|
|||
response = client.list_objects_v2(Bucket=src_bucket_name)
|
||||
src_keys = _get_keys(response)
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -14864,12 +14928,16 @@ def test_bucket_logging_put_concurrency():
|
|||
|
||||
response = client.list_objects_v2(Bucket=src_bucket_name)
|
||||
src_keys = _get_keys(response)
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
if not has_extensions:
|
||||
time.sleep(expected_object_roll_time*1.1)
|
||||
t = []
|
||||
for i in range(num_keys):
|
||||
thr = threading.Thread(target = client.put_object,
|
||||
kwargs={'Bucket': src_bucket_name, 'Key': 'dummy', 'Body': 'dummy'})
|
||||
if has_extensions:
|
||||
thr = threading.Thread(target = client.post_bucket_logging,
|
||||
kwargs={'Bucket': src_bucket_name})
|
||||
else:
|
||||
thr = threading.Thread(target = client.put_object,
|
||||
kwargs={'Bucket': src_bucket_name, 'Key': 'dummy', 'Body': 'dummy'})
|
||||
thr.start()
|
||||
t.append(thr)
|
||||
_do_wait_completion(t)
|
||||
|
@ -14881,7 +14949,6 @@ def test_bucket_logging_put_concurrency():
|
|||
record_type = 'Standard' if not has_extensions else 'Journal'
|
||||
|
||||
for key in keys:
|
||||
logger.info('logging object: %s', key)
|
||||
assert key.startswith('log/')
|
||||
response = client.get_object(Bucket=log_bucket_name, Key=key)
|
||||
body = _get_body(response)
|
||||
|
@ -14922,8 +14989,7 @@ def _bucket_logging_delete_objects(versioned):
|
|||
client.delete_object(Bucket=src_bucket_name, Key=key, VersionId=response['VersionId'])
|
||||
client.delete_object(Bucket=src_bucket_name, Key=key)
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -14987,8 +15053,7 @@ def _bucket_logging_get_objects(versioned):
|
|||
client.get_object(Bucket=src_bucket_name, Key=key, VersionId=response['VersionId'])
|
||||
client.get_object(Bucket=src_bucket_name, Key=key)
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -15062,8 +15127,7 @@ def _bucket_logging_copy_objects(versioned, another_bucket):
|
|||
else:
|
||||
client.copy_object(Bucket=src_bucket_name, Key='copy_of_'+key, CopySource={'Bucket': src_bucket_name, 'Key': key})
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -15130,8 +15194,7 @@ def _bucket_logging_head_objects(versioned):
|
|||
else:
|
||||
client.head_object(Bucket=src_bucket_name, Key=key)
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -15187,8 +15250,7 @@ def _bucket_logging_mpu(versioned):
|
|||
(upload_id, data, parts) = _multipart_upload(bucket_name=src_bucket_name, key=src_key, size=objlen)
|
||||
client.complete_multipart_upload(Bucket=src_bucket_name, Key=src_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -15247,8 +15309,7 @@ def _bucket_logging_mpu_copy(versioned):
|
|||
|
||||
client.copy_object(Bucket=src_bucket_name, Key='copy_of_'+src_key, CopySource={'Bucket': src_bucket_name, 'Key': src_key})
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -15312,8 +15373,7 @@ def _bucket_logging_multi_delete(versioned):
|
|||
objs_dict = _make_objs_dict(key_names=src_keys)
|
||||
client.delete_objects(Bucket=src_bucket_name, Delete=objs_dict)
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -15366,9 +15426,7 @@ def _bucket_logging_type(logging_type):
|
|||
response = client.list_objects_v2(Bucket=src_bucket_name)
|
||||
src_keys = _get_keys(response)
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
|
||||
client.head_object(Bucket=src_bucket_name, Key='dummy')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -15503,9 +15561,8 @@ def test_bucket_logging_multiple_prefixes():
|
|||
name = 'myobject'+str(j)
|
||||
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
for src_bucket_name in buckets:
|
||||
client.head_object(Bucket=src_bucket_name, Key='myobject0')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -15555,8 +15612,7 @@ def test_bucket_logging_single_prefix():
|
|||
name = 'myobject'+str(bucket_ind)+str(j)
|
||||
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
|
||||
|
||||
time.sleep(expected_object_roll_time)
|
||||
client.put_object(Bucket=buckets[0], Key='dummy', Body='dummy')
|
||||
_flush_logs(client, src_bucket_name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
@ -15572,6 +15628,372 @@ def test_bucket_logging_single_prefix():
|
|||
found = _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, 'Standard', num_keys)
|
||||
assert found
|
||||
|
||||
def _bucket_logging_cleanup(cleanup_type, logging_type, single_prefix, concurrency):
|
||||
if not _has_bucket_logging_extension():
|
||||
pytest.skip('ceph extension to bucket logging not supported at client')
|
||||
log_bucket_name = get_new_bucket_name()
|
||||
log_bucket = get_new_bucket_resource(name=log_bucket_name)
|
||||
client = get_client()
|
||||
|
||||
num_buckets = 5
|
||||
buckets = []
|
||||
longer_time = expected_object_roll_time*10
|
||||
for j in range(num_buckets):
|
||||
src_bucket_name = get_new_bucket_name()
|
||||
src_bucket = get_new_bucket_resource(name=src_bucket_name)
|
||||
logging_enabled = {'TargetBucket': log_bucket_name,
|
||||
'ObjectRollTime': longer_time, 'LoggingType': logging_type}
|
||||
if single_prefix:
|
||||
logging_enabled['TargetPrefix'] = 'log/'
|
||||
else:
|
||||
logging_enabled['TargetPrefix'] = src_bucket_name+'/'
|
||||
|
||||
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
|
||||
'LoggingEnabled': logging_enabled,
|
||||
})
|
||||
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
|
||||
buckets.append(src_bucket_name)
|
||||
|
||||
num_keys = 10
|
||||
src_names = []
|
||||
for j in range(num_keys):
|
||||
src_names.append('myobject'+str(j))
|
||||
|
||||
for src_bucket_name in buckets:
|
||||
for name in src_names:
|
||||
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
|
||||
client.delete_object(Bucket=src_bucket_name, Key=name)
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
assert len(keys) == 0
|
||||
|
||||
t = []
|
||||
updated_longer_time = expected_object_roll_time*20
|
||||
for src_bucket_name in buckets:
|
||||
if not single_prefix:
|
||||
logging_enabled['TargetPrefix'] = src_bucket_name+'/'
|
||||
if cleanup_type == 'deletion':
|
||||
# cleanup based on bucket deletion
|
||||
if concurrency:
|
||||
thr = threading.Thread(target = client.delete_bucket,
|
||||
kwargs={'Bucket': src_bucket_name})
|
||||
thr.start()
|
||||
t.append(thr)
|
||||
else:
|
||||
client.delete_bucket(Bucket=src_bucket_name)
|
||||
elif cleanup_type == 'disabling':
|
||||
# cleanup based on disabling bucket logging
|
||||
if concurrency:
|
||||
thr = threading.Thread(target = client.put_bucket_logging,
|
||||
kwargs={'Bucket': src_bucket_name, 'BucketLoggingStatus': {}})
|
||||
thr.start()
|
||||
t.append(thr)
|
||||
else:
|
||||
client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={})
|
||||
elif cleanup_type == 'updating':
|
||||
# cleanup based on updating bucket logging parameters
|
||||
logging_enabled['ObjectRollTime'] = updated_longer_time
|
||||
if concurrency:
|
||||
thr = threading.Thread(target = client.put_bucket_logging,
|
||||
kwargs={'Bucket': src_bucket_name, 'BucketLoggingStatus': {'LoggingEnabled': logging_enabled}})
|
||||
thr.start()
|
||||
t.append(thr)
|
||||
else:
|
||||
client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
|
||||
'LoggingEnabled': logging_enabled,
|
||||
})
|
||||
elif cleanup_type == 'notupdating':
|
||||
# no concurrecy testing
|
||||
client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
|
||||
'LoggingEnabled': logging_enabled,
|
||||
})
|
||||
elif cleanup_type != 'target':
|
||||
assert False, 'invalid cleanup type: ' + cleanup_type
|
||||
|
||||
exact_match = False
|
||||
_do_wait_completion(t)
|
||||
|
||||
if cleanup_type == 'target':
|
||||
# delete the log bucket and then create it to make sure that no pending objects remained
|
||||
# no concurrecy testing
|
||||
client.delete_bucket(Bucket=log_bucket_name)
|
||||
log_bucket = get_new_bucket_resource(name=log_bucket_name)
|
||||
old_names = src_names
|
||||
src_names = []
|
||||
for j in range(num_keys):
|
||||
src_names.append('after_deletion_myobject'+str(j))
|
||||
for src_bucket_name in buckets:
|
||||
for name in src_names:
|
||||
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
|
||||
client.delete_object(Bucket=src_bucket_name, Key=name)
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
assert len(keys) == 0
|
||||
# flsuh any new pending objects
|
||||
for src_bucket_name in buckets:
|
||||
client.post_bucket_logging(Bucket=src_bucket_name)
|
||||
if single_prefix and logging_type == 'Standard':
|
||||
# in case of single prefix we flush only once
|
||||
# because flushing itself will be logged
|
||||
# and the next flush will commit the log
|
||||
break
|
||||
# make sure that only the new objects are logged
|
||||
exact_match = True
|
||||
|
||||
response = client.list_objects_v2(Bucket=log_bucket_name)
|
||||
keys = _get_keys(response)
|
||||
|
||||
if cleanup_type == 'notupdating':
|
||||
# no cleanup expected
|
||||
assert len(keys) == 0
|
||||
return
|
||||
|
||||
if single_prefix:
|
||||
assert len(keys) == 1
|
||||
else:
|
||||
assert len(keys) >= num_buckets
|
||||
|
||||
for key in keys:
|
||||
response = client.get_object(Bucket=log_bucket_name, Key=key)
|
||||
body = _get_body(response)
|
||||
found = False
|
||||
for src_bucket_name in buckets:
|
||||
if single_prefix:
|
||||
prefix = 'log/'
|
||||
else:
|
||||
prefix = src_bucket_name+'/'
|
||||
if key.startswith(prefix):
|
||||
found = True
|
||||
assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_names, logging_type, num_keys, exact_match)
|
||||
assert _verify_records(body, src_bucket_name, 'REST.DELETE.OBJECT', src_names, logging_type, num_keys, exact_match)
|
||||
assert found
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_bucket_deletion_j():
|
||||
_bucket_logging_cleanup('deletion', 'Journal', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_bucket_deletion_j_single():
|
||||
_bucket_logging_cleanup('deletion', 'Journal', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_disabling_j():
|
||||
_bucket_logging_cleanup('disabling', 'Journal', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_disabling_j_single():
|
||||
_bucket_logging_cleanup('disabling', 'Journal', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_updating_j():
|
||||
_bucket_logging_cleanup('updating', 'Journal', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_updating_j_single():
|
||||
_bucket_logging_cleanup('updating', 'Journal', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_notupdating_j():
|
||||
_bucket_logging_cleanup('notupdating', 'Journal', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_notupdating_j_single():
|
||||
_bucket_logging_cleanup('notupdating', 'Journal', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_bucket_deletion_s():
|
||||
_bucket_logging_cleanup('deletion', 'Standard', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_bucket_deletion_s_single():
|
||||
_bucket_logging_cleanup('deletion', 'Standard', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_disabling_s():
|
||||
_bucket_logging_cleanup('disabling', 'Standard', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_disabling_s_single():
|
||||
_bucket_logging_cleanup('disabling', 'Standard', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_updating_s():
|
||||
_bucket_logging_cleanup('updating', 'Standard', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_updating_s_single():
|
||||
_bucket_logging_cleanup('updating', 'Standard', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_notupdating_s():
|
||||
_bucket_logging_cleanup('notupdating', 'Standard', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_notupdating_s_single():
|
||||
_bucket_logging_cleanup('notupdating', 'Standard', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_target_cleanup_j():
|
||||
_bucket_logging_cleanup('target', 'Journal', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_target_cleanup_j_single():
|
||||
_bucket_logging_cleanup('target', 'Journal', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_target_cleanup_s():
|
||||
_bucket_logging_cleanup('target', 'Standard', False, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_target_cleanup_s_single():
|
||||
_bucket_logging_cleanup('target', 'Standard', True, False)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_bucket_concurrent_deletion_j():
|
||||
_bucket_logging_cleanup('deletion', 'Journal', False, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_bucket_concurrent_deletion_j_single():
|
||||
_bucket_logging_cleanup('deletion', 'Journal', True, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_concurrent_disabling_j():
|
||||
_bucket_logging_cleanup('disabling', 'Journal', False, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_concurrent_disabling_j_single():
|
||||
_bucket_logging_cleanup('disabling', 'Journal', True, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_concurrent_updating_j():
|
||||
_bucket_logging_cleanup('updating', 'Journal', False, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_concurrent_updating_j_single():
|
||||
_bucket_logging_cleanup('updating', 'Journal', True, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_bucket_concurrent_deletion_s():
|
||||
_bucket_logging_cleanup('deletion', 'Standard', False, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_bucket_concurrent_deletion_s_single():
|
||||
_bucket_logging_cleanup('deletion', 'Standard', True, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_concurrent_disabling_s():
|
||||
_bucket_logging_cleanup('disabling', 'Standard', False, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_concurrent_disabling_s_single():
|
||||
_bucket_logging_cleanup('disabling', 'Standard', True, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_concurrent_updating_s():
|
||||
_bucket_logging_cleanup('updating', 'Standard', False, True)
|
||||
|
||||
|
||||
@pytest.mark.bucket_logging
|
||||
@pytest.mark.bucket_logging_cleanup
|
||||
@pytest.mark.fails_on_aws
|
||||
def test_bucket_logging_cleanup_concurrent_updating_s_single():
|
||||
_bucket_logging_cleanup('updating', 'Standard', True, True)
|
||||
|
||||
|
||||
def check_parts_count(parts, expected):
|
||||
# AWS docs disagree on the name of this element
|
||||
if 'TotalPartsCount' in parts:
|
||||
|
@ -15876,3 +16298,4 @@ def test_get_object_attributes():
|
|||
assert response['ETag'] == etag
|
||||
assert response['StorageClass'] == 'STANDARD'
|
||||
assert 'ObjectParts' not in response
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue