mirror of
https://github.com/ceph/s3-tests.git
synced 2025-03-21 09:37:53 +00:00
rgw/restore: s3tests to test restore object functionality.
This tests are added to tests temporary restore, permanent restore and read through resotre object functionality, this includes zonegroup parameters and checks. Signed-off-by: shreyanshjain7174 <ssanchet@redhat.com> Signed-off-by: Jiffin Tony Thottan <thottanjiffin@gmail.com>
This commit is contained in:
parent
ae8bebd87f
commit
ca71ddd86b
4 changed files with 173 additions and 1 deletions
|
@ -10,6 +10,7 @@ markers =
|
|||
bucket_logging
|
||||
checksum
|
||||
cloud_transition
|
||||
cloud_restore
|
||||
encryption
|
||||
fails_on_aws
|
||||
fails_on_dbstore
|
||||
|
|
|
@ -54,6 +54,8 @@ secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
|
|||
|
||||
## Lifecycle debug interval (default: 10)
|
||||
#lc_debug_interval = 20
|
||||
## Restore debug interval (default: 100)
|
||||
#rgw_restore_debug_interval = 60
|
||||
|
||||
[s3 alt]
|
||||
# alt display_name set in vstart.sh
|
||||
|
@ -71,7 +73,8 @@ access_key = NOPQRSTUVWXYZABCDEFG
|
|||
secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm
|
||||
|
||||
#[s3 cloud]
|
||||
## to run the testcases with "cloud_transition" attribute.
|
||||
## to run the testcases with "cloud_transition" for transition
|
||||
## and "cloud_restore" for restore attribute.
|
||||
## Note: the waiting time may have to tweaked depending on
|
||||
## the I/O latency to the cloud endpoint.
|
||||
|
||||
|
@ -95,6 +98,8 @@ secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm
|
|||
|
||||
## Above configured cloud storage class config options
|
||||
# retain_head_object = false
|
||||
# allow_read_through = false # change it to enable read_through
|
||||
# read_through_restore_days = 2
|
||||
# target_storage_class = Target_SC
|
||||
# target_path = cloud-bucket
|
||||
|
||||
|
|
|
@ -248,6 +248,11 @@ def configure():
|
|||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
config.lc_debug_interval = 10
|
||||
|
||||
try:
|
||||
config.rgw_restore_debug_interval = int(cfg.get('s3 main',"rgw_restore_debug_interval"))
|
||||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
config.rgw_restore_debug_interval = 100
|
||||
|
||||
config.alt_access_key = cfg.get('s3 alt',"access_key")
|
||||
config.alt_secret_key = cfg.get('s3 alt',"secret_key")
|
||||
config.alt_display_name = cfg.get('s3 alt',"display_name")
|
||||
|
@ -375,6 +380,11 @@ def get_cloud_config(cfg):
|
|||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
config.cloud_retain_head_object = None
|
||||
|
||||
try:
|
||||
config.allow_read_through = cfg.get('s3 cloud',"allow_read_through")
|
||||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
config.allow_read_through = False
|
||||
|
||||
try:
|
||||
config.cloud_target_path = cfg.get('s3 cloud',"target_path")
|
||||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
|
@ -389,6 +399,11 @@ def get_cloud_config(cfg):
|
|||
config.cloud_regular_storage_class = cfg.get('s3 cloud', "storage_class")
|
||||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
config.cloud_regular_storage_class = None
|
||||
|
||||
try:
|
||||
config.read_through_restore_days = int(cfg.get('s3 cloud', "read_through_restore_days"))
|
||||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
config.read_through_restore_days = 10
|
||||
|
||||
|
||||
def get_client(client_config=None):
|
||||
|
@ -769,6 +784,9 @@ def get_cloud_storage_class():
|
|||
def get_cloud_retain_head_object():
|
||||
return config.cloud_retain_head_object
|
||||
|
||||
def get_allow_read_through():
|
||||
return config.allow_read_through
|
||||
|
||||
def get_cloud_regular_storage_class():
|
||||
return config.cloud_regular_storage_class
|
||||
|
||||
|
@ -780,3 +798,9 @@ def get_cloud_target_storage_class():
|
|||
|
||||
def get_lc_debug_interval():
|
||||
return config.lc_debug_interval
|
||||
|
||||
def get_restore_debug_interval():
|
||||
return config.rgw_restore_debug_interval
|
||||
|
||||
def get_read_through_days():
|
||||
return config.read_through_restore_days
|
||||
|
|
|
@ -78,6 +78,7 @@ from . import (
|
|||
get_svc_client,
|
||||
get_cloud_storage_class,
|
||||
get_cloud_retain_head_object,
|
||||
get_allow_read_through,
|
||||
get_cloud_regular_storage_class,
|
||||
get_cloud_target_path,
|
||||
get_cloud_target_storage_class,
|
||||
|
@ -85,6 +86,8 @@ from . import (
|
|||
nuke_prefixed_buckets,
|
||||
configured_storage_classes,
|
||||
get_lc_debug_interval,
|
||||
get_restore_debug_interval,
|
||||
get_read_through_days,
|
||||
)
|
||||
|
||||
|
||||
|
@ -9430,6 +9433,15 @@ def verify_object(client, bucket, key, content=None, sc=None):
|
|||
body = _get_body(response)
|
||||
assert body == content
|
||||
|
||||
def verify_transition(client, bucket, key, sc=None):
|
||||
response = client.head_object(Bucket=bucket, Key=key)
|
||||
|
||||
# Iterate over the contents to find the StorageClass
|
||||
if 'StorageClass' in response:
|
||||
assert response['StorageClass'] == sc
|
||||
else: # storage class should be STANDARD
|
||||
assert 'STANDARD' == sc
|
||||
|
||||
# The test harness for lifecycle is configured to treat days as 10 second intervals.
|
||||
@pytest.mark.lifecycle
|
||||
@pytest.mark.lifecycle_transition
|
||||
|
@ -9727,6 +9739,136 @@ def test_lifecycle_cloud_transition_large_obj():
|
|||
expire1_key1_str = prefix + keys[1]
|
||||
verify_object(cloud_client, target_path, expire1_key1_str, data, target_sc)
|
||||
|
||||
@pytest.mark.lifecycle_transition
|
||||
@pytest.mark.cloud_transition
|
||||
@pytest.mark.cloud_restore
|
||||
@pytest.mark.fails_on_aws
|
||||
@pytest.mark.fails_on_dbstore
|
||||
def test_restore_object_temporary():
|
||||
cloud_sc = get_cloud_storage_class()
|
||||
if cloud_sc is None:
|
||||
pytest.skip('[s3 cloud] section missing cloud_storage_class')
|
||||
|
||||
bucket = get_new_bucket()
|
||||
client = get_client()
|
||||
key = 'test_restore_temp'
|
||||
data = 'temporary restore data'
|
||||
|
||||
# Put object
|
||||
client.put_object(Bucket=bucket, Key=key, Body=data)
|
||||
verify_object(client, bucket, key, data)
|
||||
|
||||
# Transition object to cloud storage class
|
||||
rules = [{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': '', 'Status': 'Enabled'}]
|
||||
lifecycle = {'Rules': rules}
|
||||
client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
|
||||
|
||||
lc_interval = get_lc_debug_interval()
|
||||
restore_interval = get_restore_debug_interval()
|
||||
time.sleep(7 * lc_interval)
|
||||
|
||||
# Verify object is transitioned
|
||||
verify_transition(client, bucket, key, cloud_sc)
|
||||
|
||||
# Restore object temporarily
|
||||
client.restore_object(Bucket=bucket, Key=key, RestoreRequest={'Days': 2})
|
||||
time.sleep(2)
|
||||
|
||||
# Verify object is restored temporarily
|
||||
verify_transition(client, bucket, key, cloud_sc)
|
||||
response = client.head_object(Bucket=bucket, Key=key)
|
||||
assert response['ContentLength'] == len(data)
|
||||
time.sleep(2 * (restore_interval + lc_interval))
|
||||
|
||||
#verify object expired
|
||||
response = client.head_object(Bucket=bucket, Key=key)
|
||||
assert response['ContentLength'] == 0
|
||||
|
||||
@pytest.mark.lifecycle_transition
|
||||
@pytest.mark.cloud_transition
|
||||
@pytest.mark.cloud_restore
|
||||
@pytest.mark.fails_on_aws
|
||||
@pytest.mark.fails_on_dbstore
|
||||
def test_restore_object_permanent():
|
||||
cloud_sc = get_cloud_storage_class()
|
||||
if cloud_sc is None:
|
||||
pytest.skip('[s3 cloud] section missing cloud_storage_class')
|
||||
|
||||
bucket = get_new_bucket()
|
||||
client = get_client()
|
||||
key = 'test_restore_perm'
|
||||
data = 'permanent restore data'
|
||||
|
||||
# Put object
|
||||
client.put_object(Bucket=bucket, Key=key, Body=data)
|
||||
verify_object(client, bucket, key, data)
|
||||
|
||||
# Transition object to cloud storage class
|
||||
rules = [{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': '', 'Status': 'Enabled'}]
|
||||
lifecycle = {'Rules': rules}
|
||||
client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
|
||||
|
||||
lc_interval = get_lc_debug_interval()
|
||||
time.sleep(7 * lc_interval)
|
||||
|
||||
# Verify object is transitioned
|
||||
verify_transition(client, bucket, key, cloud_sc)
|
||||
|
||||
# Restore object permanently
|
||||
client.restore_object(Bucket=bucket, Key=key, RestoreRequest={})
|
||||
time.sleep(2)
|
||||
# Verify object is restored permanently
|
||||
verify_transition(client, bucket, key, 'STANDARD')
|
||||
response = client.head_object(Bucket=bucket, Key=key)
|
||||
assert response['ContentLength'] == len(data)
|
||||
|
||||
@pytest.mark.lifecycle_transition
|
||||
@pytest.mark.cloud_transition
|
||||
@pytest.mark.cloud_restore
|
||||
@pytest.mark.fails_on_aws
|
||||
@pytest.mark.fails_on_dbstore
|
||||
def test_read_through():
|
||||
cloud_sc = get_cloud_storage_class()
|
||||
if cloud_sc is None:
|
||||
pytest.skip('[s3 cloud] section missing cloud_storage_class')
|
||||
|
||||
bucket = get_new_bucket()
|
||||
client = get_client()
|
||||
key = 'test_restore_readthrough'
|
||||
data = 'restore data with readthrough'
|
||||
|
||||
# Put object
|
||||
client.put_object(Bucket=bucket, Key=key, Body=data)
|
||||
verify_object(client, bucket, key, data)
|
||||
|
||||
# Transition object to cloud storage class
|
||||
rules = [{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': '', 'Status': 'Enabled'}]
|
||||
lifecycle = {'Rules': rules}
|
||||
client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
|
||||
|
||||
lc_interval = get_lc_debug_interval()
|
||||
restore_interval = get_read_through_days()
|
||||
time.sleep(7 * lc_interval)
|
||||
|
||||
# Check the storage class after transitioning
|
||||
verify_transition(client, bucket, key, cloud_sc)
|
||||
|
||||
# Restore the object using read_through request
|
||||
allow_readthrough = get_allow_read_through()
|
||||
if allow_readthrough:
|
||||
response = client.get_object(Bucket=bucket, Key=key)
|
||||
time.sleep(2)
|
||||
assert response['ContentLength'] == len(data)
|
||||
time.sleep(2 * (restore_interval + lc_interval))
|
||||
# verify object expired
|
||||
response = client.head_object(Bucket=bucket, Key=key)
|
||||
assert response['ContentLength'] == 0
|
||||
|
||||
else:
|
||||
with assert_raises(ClientError) as e:
|
||||
response = client.get_object(Bucket=bucket, Key=key)
|
||||
assert e.exception.response['Error']['Code'] == '403'
|
||||
|
||||
@pytest.mark.encryption
|
||||
@pytest.mark.fails_on_dbstore
|
||||
def test_encrypted_transfer_1b():
|
||||
|
|
Loading…
Add table
Reference in a new issue