Add test cases for S3 blobovnicza and fstree loss
This commit is contained in:
parent
2d174831ab
commit
520f9fe5b5
1 changed files with 87 additions and 1 deletions
|
@ -8,7 +8,7 @@ from frostfs_testlib.analytics import test_case
|
||||||
from frostfs_testlib.hosting import Host
|
from frostfs_testlib.hosting import Host
|
||||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
||||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
|
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.shell import CommandOptions, Shell
|
from frostfs_testlib.shell import CommandOptions, Shell
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
from frostfs_testlib.steps.cli.container import create_container
|
||||||
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
|
||||||
|
@ -359,3 +359,89 @@ class TestEmptyMap(ClusterTestBase):
|
||||||
self.tick_epochs(1)
|
self.tick_epochs(1)
|
||||||
check_node_in_map(node, shell=self.shell, alive_node=node)
|
check_node_in_map(node, shell=self.shell, alive_node=node)
|
||||||
stopped_nodes.remove(node)
|
stopped_nodes.remove(node)
|
||||||
|
|
||||||
|
@allure.title("Test S3 Object loss from fstree/blobovnicza, versioning is enabled")
|
||||||
|
def test_s3_fstree_blobovnicza_loss_versioning_on(
|
||||||
|
self, s3_client: S3ClientWrapper, simple_object_size
|
||||||
|
):
|
||||||
|
bucket = s3_client.create_bucket()
|
||||||
|
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||||
|
|
||||||
|
file_path = generate_file(simple_object_size)
|
||||||
|
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||||
|
|
||||||
|
object_versions = []
|
||||||
|
with allure.step("Put object into one bucket"):
|
||||||
|
put_object = s3_client.put_object(bucket, file_path)
|
||||||
|
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
|
||||||
|
object_versions.append(put_object)
|
||||||
|
|
||||||
|
with allure.step("Stop all storage nodes"):
|
||||||
|
for node in self.cluster.storage_nodes:
|
||||||
|
with allure.step(f"Stop storage service on node: {node}"):
|
||||||
|
node.stop_service()
|
||||||
|
stopped_nodes.append(node)
|
||||||
|
|
||||||
|
with allure.step("Delete blobovnicza and fstree from all nodes"):
|
||||||
|
for node in self.cluster.storage_nodes:
|
||||||
|
node.delete_blobovnicza()
|
||||||
|
node.delete_fstree()
|
||||||
|
|
||||||
|
with allure.step("Start all storage nodes"):
|
||||||
|
for node in list(stopped_nodes):
|
||||||
|
with allure.step(f"Start node {node}"):
|
||||||
|
node.start_service()
|
||||||
|
stopped_nodes.remove(node)
|
||||||
|
with allure.step(f"Waiting status ready for node {node}"):
|
||||||
|
wait_for_node_to_be_ready(node)
|
||||||
|
|
||||||
|
# need to get Delete Marker first
|
||||||
|
with allure.step("Delete the object from the bucket"):
|
||||||
|
delete_object = s3_client.delete_object(bucket, file_name)
|
||||||
|
object_versions.append(delete_object["VersionId"])
|
||||||
|
|
||||||
|
# and now delete all versions of object (including Delete Markers)
|
||||||
|
with allure.step("Delete all versions of the object from the bucket"):
|
||||||
|
for version in object_versions:
|
||||||
|
delete_object = s3_client.delete_object(bucket, file_name, version_id=version)
|
||||||
|
|
||||||
|
with allure.step("Delete bucket"):
|
||||||
|
s3_client.delete_bucket(bucket)
|
||||||
|
|
||||||
|
@allure.title("Test S3 Object loss from fstree/blobovnicza, versioning is disabled")
|
||||||
|
def test_s3_fstree_blobovnicza_loss_versioning_off(
|
||||||
|
self, s3_client: S3ClientWrapper, simple_object_size
|
||||||
|
):
|
||||||
|
bucket = s3_client.create_bucket()
|
||||||
|
|
||||||
|
file_path = generate_file(simple_object_size)
|
||||||
|
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||||
|
|
||||||
|
with allure.step("Put object into one bucket"):
|
||||||
|
s3_client.put_object(bucket, file_path)
|
||||||
|
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
|
||||||
|
|
||||||
|
with allure.step("Stop all storage nodes"):
|
||||||
|
for node in self.cluster.storage_nodes:
|
||||||
|
with allure.step(f"Stop storage service on node: {node}"):
|
||||||
|
node.stop_service()
|
||||||
|
stopped_nodes.append(node)
|
||||||
|
|
||||||
|
with allure.step("Delete blobovnicza and fstree from all nodes"):
|
||||||
|
for node in self.cluster.storage_nodes:
|
||||||
|
node.delete_blobovnicza()
|
||||||
|
node.delete_fstree()
|
||||||
|
|
||||||
|
with allure.step("Start all storage nodes"):
|
||||||
|
for node in list(stopped_nodes):
|
||||||
|
with allure.step(f"Start node {node}"):
|
||||||
|
node.start_service()
|
||||||
|
stopped_nodes.remove(node)
|
||||||
|
with allure.step(f"Waiting status ready for node {node}"):
|
||||||
|
wait_for_node_to_be_ready(node)
|
||||||
|
|
||||||
|
with allure.step("Delete the object from the bucket"):
|
||||||
|
s3_client.delete_object(bucket, file_name)
|
||||||
|
|
||||||
|
with allure.step("Delete bucket"):
|
||||||
|
s3_client.delete_bucket(bucket)
|
||||||
|
|
Loading…
Reference in a new issue