import allure import pytest from frostfs_testlib import reporter from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.steps.cli.container import list_objects from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split_file PART_SIZE = 5 * 1024 * 1024 @pytest.mark.nightly @pytest.mark.s3_gate @pytest.mark.s3_gate_multipart class TestS3GateMultipart(ClusterTestBase): NO_SUCH_UPLOAD = "The upload ID may be invalid, or the upload may have been aborted or completed." @allure.title("Object Multipart API (s3_client={s3_client}, bucket versioning = {versioning_status})") @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED, VersioningStatus.UNDEFINED], indirect=True) def test_s3_object_multipart( self, s3_client: S3ClientWrapper, bucket: str, default_wallet: WalletInfo, versioning_status: str, bucket_container_resolver: BucketContainerResolver, ): parts_count = 5 file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part object_key = s3_helper.object_key_from_file_path(file_name_large) part_files = split_file(file_name_large, parts_count) parts = [] with reporter.step(f"Get related container_id for bucket"): for cluster_node in self.cluster.cluster_nodes: container_id = bucket_container_resolver.resolve(cluster_node, bucket) if container_id: break with reporter.step("Upload first part"): upload_id = s3_client.create_multipart_upload(bucket, object_key) uploads = s3_client.list_multipart_uploads(bucket) etag = s3_client.upload_part(bucket, object_key, upload_id, 1, part_files[0]) parts.append((1, etag)) got_parts = s3_client.list_parts(bucket, object_key, upload_id) assert len(got_parts) == 1, f"Expected {1} parts, got\n{got_parts}" with reporter.step("Upload last parts"): for part_id, file_path in enumerate(part_files[1:], start=2): etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path) parts.append((part_id, etag)) with reporter.step("Check all parts are visible in bucket"): got_parts = s3_client.list_parts(bucket, object_key, upload_id) assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}" with reporter.step("Complete multipart upload"): response = s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts) version_id = None if versioning_status == VersioningStatus.ENABLED: version_id = response["VersionId"] with reporter.step("There should be no multipart uploads"): uploads = s3_client.list_multipart_uploads(bucket) assert not uploads, f"Expected there is no uploads in bucket {bucket}" with reporter.step("Check we can get whole object from bucket"): got_object = s3_client.get_object(bucket, object_key) assert get_file_hash(got_object) == get_file_hash(file_name_large) with reporter.step("Delete the object"): s3_client.delete_object(bucket, object_key, version_id) with reporter.step("There should be no objects in bucket"): objects_list = s3_client.list_objects(bucket) assert not objects_list, f"Expected empty bucket, got {objects_list}" with reporter.step("There should be no objects in container"): objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint) assert len(objects) == 0, f"Expected no objects in container, got\n{objects}" @allure.title("Abort Multipart Upload (s3_client={s3_client})") @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True) def test_s3_abort_multipart( self, s3_client: S3ClientWrapper, default_wallet: WalletInfo, bucket: str, simple_object_size: ObjectSize, complex_object_size: ObjectSize, bucket_container_resolver: BucketContainerResolver, ): complex_file = generate_file(complex_object_size.value) simple_file = generate_file(simple_object_size.value) to_upload = [complex_file, complex_file, simple_file] files_count = len(to_upload) upload_key = "multipart_abort" with reporter.step("Get related container_id for bucket"): for cluster_node in self.cluster.cluster_nodes: container_id = bucket_container_resolver.resolve(cluster_node, bucket) if container_id: break with reporter.step("Create multipart upload"): upload_id = s3_client.create_multipart_upload(bucket, upload_key) with reporter.step(f"Upload {files_count} parts to multipart upload"): for i, file in enumerate(to_upload, 1): s3_client.upload_part(bucket, upload_key, upload_id, i, file) with reporter.step(f"There should be {files_count} objects in bucket"): parts = s3_client.list_parts(bucket, upload_key, upload_id) assert len(parts) == files_count, f"Expected {files_count} parts, got\n{parts}" with reporter.step(f"There should be {files_count} objects in container"): objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint) assert len(objects) == files_count, f"Expected {files_count} objects in container, got\n{objects}" with reporter.step("Abort multipart upload"): s3_client.abort_multipart_upload(bucket, upload_key, upload_id) uploads = s3_client.list_multipart_uploads(bucket) assert not uploads, f"Expected no uploads in bucket {bucket}" with reporter.step("There should be no objects in bucket"): with pytest.raises(Exception, match=self.NO_SUCH_UPLOAD): s3_client.list_parts(bucket, upload_key, upload_id) with reporter.step("There should be no objects in container"): @wait_for_success(120, 10) def check_no_objects(): objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint) assert len(objects) == 0, f"Expected no objects in container, got\n{objects}" check_no_objects() @allure.title("Upload Part Copy (s3_client={s3_client})") @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True) def test_s3_multipart_copy(self, s3_client: S3ClientWrapper, bucket: str): parts_count = 3 file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part object_key = s3_helper.object_key_from_file_path(file_name_large) part_files = split_file(file_name_large, parts_count) parts = [] objs = [] with reporter.step(f"Put {parts_count} objects in bucket"): for part in part_files: s3_client.put_object(bucket, part) objs.append(s3_helper.object_key_from_file_path(part)) s3_helper.check_objects_in_bucket(s3_client, bucket, objs) with reporter.step("Create multipart upload object"): upload_id = s3_client.create_multipart_upload(bucket, object_key) uploads = s3_client.list_multipart_uploads(bucket) assert len(uploads) == 1, f"Expected one upload in bucket {bucket}" assert uploads[0].get("Key") == object_key, f"Expected correct key {object_key} in upload {uploads}" assert uploads[0].get("UploadId") == upload_id, f"Expected correct UploadId {upload_id} in upload {uploads}" with reporter.step("Upload parts to multipart upload"): for part_id, obj_key in enumerate(objs, start=1): etag = s3_client.upload_part_copy(bucket, object_key, upload_id, part_id, f"{bucket}/{obj_key}") parts.append((part_id, etag)) got_parts = s3_client.list_parts(bucket, object_key, upload_id) with reporter.step("Complete multipart upload"): s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts) assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}" with reporter.step("Get whole object from bucket"): got_object = s3_client.get_object(bucket, object_key) assert get_file_hash(got_object) == get_file_hash(file_name_large)