forked from TrueCloudLab/frostfs-testcases
Add case for loss of pilorama on one node
This commit is contained in:
parent
c9bfba136d
commit
737c544fc0
1 changed files with 98 additions and 1 deletions
|
@ -518,6 +518,17 @@ class TestEmptyMap(ClusterTestBase):
|
||||||
@pytest.mark.failover
|
@pytest.mark.failover
|
||||||
@pytest.mark.failover_data_loss
|
@pytest.mark.failover_data_loss
|
||||||
class TestStorageDataLoss(ClusterTestBase):
|
class TestStorageDataLoss(ClusterTestBase):
|
||||||
|
@allure.step("Get list of all piloramas on node")
|
||||||
|
def get_piloramas_list(self, cluster_state_controller, node) -> list:
|
||||||
|
data_directory_path = cluster_state_controller.get_data_directory()
|
||||||
|
|
||||||
|
cmd = f"sudo ls -1 {data_directory_path}/meta*/pilorama*"
|
||||||
|
shell = cluster_state_controller.host.get_shell()
|
||||||
|
stdout = shell.exec(cmd).stdout
|
||||||
|
|
||||||
|
piloramas = stdout.split("\n")
|
||||||
|
return piloramas
|
||||||
|
|
||||||
@allure.title(
|
@allure.title(
|
||||||
"After metabase loss on all nodes operations on objects and buckets should be still available via S3"
|
"After metabase loss on all nodes operations on objects and buckets should be still available via S3"
|
||||||
)
|
)
|
||||||
|
@ -695,4 +706,90 @@ class TestStorageDataLoss(ClusterTestBase):
|
||||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||||
with allure.step("Put object into one bucket"):
|
with allure.step("Put object into one bucket"):
|
||||||
put_object = s3_client.put_object(bucket, file_path)
|
put_object = s3_client.put_object(bucket, file_path)
|
||||||
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
|
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
|
||||||
|
|
||||||
|
@allure.title("After Pilorama.db loss on one node object are retrievable")
|
||||||
|
def test_s3_one_pilorama_loss(
|
||||||
|
self,
|
||||||
|
s3_client: S3ClientWrapper,
|
||||||
|
simple_object_size: int,
|
||||||
|
cluster_state_controller: ClusterStateController,
|
||||||
|
):
|
||||||
|
bucket = s3_client.create_bucket(
|
||||||
|
location_constraint="load-1-4",
|
||||||
|
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
|
||||||
|
)
|
||||||
|
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||||
|
|
||||||
|
with allure.step("Check bucket versioning"):
|
||||||
|
bucket_versioning = s3_client.get_bucket_versioning_status(bucket)
|
||||||
|
assert bucket_versioning == "Enabled", "Bucket should have enabled versioning"
|
||||||
|
|
||||||
|
file_path = generate_file(simple_object_size)
|
||||||
|
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||||
|
|
||||||
|
object_versions = []
|
||||||
|
with allure.step("Put object into one bucket"):
|
||||||
|
put_object = s3_client.put_object(bucket, file_path)
|
||||||
|
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
|
||||||
|
object_versions.append(put_object)
|
||||||
|
|
||||||
|
node_to_check = self.cluster.storage_nodes[0]
|
||||||
|
piloramas_list_before_removing = {}
|
||||||
|
with allure.step("Get list of all pilorama.db"):
|
||||||
|
piloramas_list_before_removing = self.get_piloramas_list(
|
||||||
|
node_to_check, cluster_state_controller
|
||||||
|
)
|
||||||
|
|
||||||
|
with allure.step("Stop all storage nodes"):
|
||||||
|
for node in self.cluster.cluster_nodes:
|
||||||
|
with allure.step(f"Stop storage service on node: {node}"):
|
||||||
|
cluster_state_controller.stop_storage_service(node)
|
||||||
|
|
||||||
|
with allure.step("Delete pilorama.db from one node"):
|
||||||
|
node_to_check.delete_pilorama()
|
||||||
|
|
||||||
|
with allure.step("Start all storage nodes"):
|
||||||
|
cluster_state_controller.start_stopped_storage_services()
|
||||||
|
|
||||||
|
with allure.step("Tick epoch to trigger sync and then wait for 1 minute"):
|
||||||
|
self.tick_epochs(1)
|
||||||
|
sleep(120)
|
||||||
|
|
||||||
|
piloramas_list_afrer_removing = {}
|
||||||
|
with allure.step("Get list of all pilorama.db after sync"):
|
||||||
|
piloramas_list_afrer_removing = self.get_piloramas_list(
|
||||||
|
node_to_check, cluster_state_controller
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
piloramas_list_afrer_removing == piloramas_list_before_removing
|
||||||
|
), "List of pilorama.db is different"
|
||||||
|
|
||||||
|
with allure.step("Check bucket versioning"):
|
||||||
|
bucket_versioning = s3_client.get_bucket_versioning_status(bucket)
|
||||||
|
assert bucket_versioning == "Enabled", "Bucket should have enabled versioning"
|
||||||
|
|
||||||
|
with allure.step("Check list objects"):
|
||||||
|
objects_list = s3_client.list_objects(bucket)
|
||||||
|
assert objects_list, f"Expected not empty bucket"
|
||||||
|
|
||||||
|
with allure.step("Delete the object from the bucket"):
|
||||||
|
delete_object = s3_client.delete_object(bucket, file_name)
|
||||||
|
assert "DeleteMarker" in delete_object.keys(), "Delete markers not found"
|
||||||
|
|
||||||
|
with allure.step("Check list objects"):
|
||||||
|
objects_list = s3_client.list_objects_versions(bucket)
|
||||||
|
assert objects_list, f"Expected not empty bucket"
|
||||||
|
object_versions.append(delete_object["VersionId"])
|
||||||
|
|
||||||
|
# and now delete all versions of object (including Delete Markers)
|
||||||
|
with allure.step("Delete all versions of the object from the bucket"):
|
||||||
|
for version in object_versions:
|
||||||
|
delete_object = s3_client.delete_object(bucket, file_name, version_id=version)
|
||||||
|
|
||||||
|
with allure.step("Check list objects"):
|
||||||
|
objects_list = s3_client.list_objects_versions(bucket)
|
||||||
|
assert not objects_list, f"Expected empty bucket"
|
||||||
|
|
||||||
|
with allure.step("Delete bucket"):
|
||||||
|
s3_client.delete_bucket(bucket)
|
||||||
|
|
Loading…
Reference in a new issue