2022-10-04 08:22:04 +00:00
|
|
|
import allure
|
|
|
|
import pytest
|
2023-11-29 13:34:59 +00:00
|
|
|
from frostfs_testlib import reporter
|
2023-10-31 14:51:09 +00:00
|
|
|
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
2024-09-02 10:25:13 +00:00
|
|
|
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
|
|
|
from frostfs_testlib.steps.cli.container import list_objects
|
2023-05-15 09:59:33 +00:00
|
|
|
from frostfs_testlib.steps.s3 import s3_helper
|
2023-08-02 11:54:03 +00:00
|
|
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
2024-03-11 16:34:54 +00:00
|
|
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
2023-05-15 09:59:33 +00:00
|
|
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
2024-06-28 12:15:25 +00:00
|
|
|
from frostfs_testlib.testing.test_control import wait_for_success
|
2023-05-15 09:59:33 +00:00
|
|
|
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split_file
|
2022-10-04 08:22:04 +00:00
|
|
|
|
|
|
|
PART_SIZE = 5 * 1024 * 1024
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.s3_gate
|
2022-10-25 12:09:07 +00:00
|
|
|
@pytest.mark.s3_gate_multipart
|
2023-05-15 09:59:33 +00:00
|
|
|
class TestS3GateMultipart(ClusterTestBase):
|
2023-10-31 14:51:09 +00:00
|
|
|
NO_SUCH_UPLOAD = "The upload ID may be invalid, or the upload may have been aborted or completed."
|
2023-04-06 18:47:08 +00:00
|
|
|
|
2024-04-23 20:45:58 +00:00
|
|
|
@allure.title("Object Multipart API (s3_client={s3_client}, bucket versioning = {versioning_status})")
|
|
|
|
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED, VersioningStatus.UNDEFINED], indirect=True)
|
|
|
|
def test_s3_object_multipart(
|
2024-09-02 10:25:13 +00:00
|
|
|
self,
|
|
|
|
s3_client: S3ClientWrapper,
|
|
|
|
bucket: str,
|
|
|
|
default_wallet: WalletInfo,
|
|
|
|
versioning_status: str,
|
|
|
|
bucket_container_resolver: BucketContainerResolver,
|
2024-04-23 20:45:58 +00:00
|
|
|
):
|
2022-10-04 08:22:04 +00:00
|
|
|
parts_count = 5
|
|
|
|
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
|
2023-05-15 09:59:33 +00:00
|
|
|
object_key = s3_helper.object_key_from_file_path(file_name_large)
|
2022-10-04 08:22:04 +00:00
|
|
|
part_files = split_file(file_name_large, parts_count)
|
|
|
|
parts = []
|
|
|
|
|
2024-04-22 14:59:32 +00:00
|
|
|
with reporter.step(f"Get related container_id for bucket"):
|
|
|
|
for cluster_node in self.cluster.cluster_nodes:
|
2024-09-02 10:25:13 +00:00
|
|
|
container_id = bucket_container_resolver.resolve(cluster_node, bucket)
|
2024-04-22 14:59:32 +00:00
|
|
|
if container_id:
|
|
|
|
break
|
|
|
|
|
2023-11-29 13:34:59 +00:00
|
|
|
with reporter.step("Upload first part"):
|
2023-05-15 09:59:33 +00:00
|
|
|
upload_id = s3_client.create_multipart_upload(bucket, object_key)
|
|
|
|
uploads = s3_client.list_multipart_uploads(bucket)
|
|
|
|
etag = s3_client.upload_part(bucket, object_key, upload_id, 1, part_files[0])
|
2022-10-04 08:22:04 +00:00
|
|
|
parts.append((1, etag))
|
2023-05-15 09:59:33 +00:00
|
|
|
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
|
2022-10-04 08:22:04 +00:00
|
|
|
assert len(got_parts) == 1, f"Expected {1} parts, got\n{got_parts}"
|
|
|
|
|
2023-11-29 13:34:59 +00:00
|
|
|
with reporter.step("Upload last parts"):
|
2022-10-04 08:22:04 +00:00
|
|
|
for part_id, file_path in enumerate(part_files[1:], start=2):
|
2023-05-15 09:59:33 +00:00
|
|
|
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
|
2022-10-04 08:22:04 +00:00
|
|
|
parts.append((part_id, etag))
|
2024-06-24 23:27:54 +00:00
|
|
|
|
|
|
|
with reporter.step("Check all parts are visible in bucket"):
|
2023-05-15 09:59:33 +00:00
|
|
|
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
|
2024-06-24 23:27:54 +00:00
|
|
|
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
|
2024-04-23 20:45:58 +00:00
|
|
|
|
2024-06-24 23:27:54 +00:00
|
|
|
with reporter.step("Complete multipart upload"):
|
|
|
|
response = s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
|
2024-04-23 20:45:58 +00:00
|
|
|
version_id = None
|
|
|
|
if versioning_status == VersioningStatus.ENABLED:
|
|
|
|
version_id = response["VersionId"]
|
2022-10-04 08:22:04 +00:00
|
|
|
|
2024-06-24 23:27:54 +00:00
|
|
|
with reporter.step("There should be no multipart uploads"):
|
2023-05-15 09:59:33 +00:00
|
|
|
uploads = s3_client.list_multipart_uploads(bucket)
|
2022-10-04 08:22:04 +00:00
|
|
|
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
|
|
|
|
|
2023-11-29 13:34:59 +00:00
|
|
|
with reporter.step("Check we can get whole object from bucket"):
|
2023-05-15 09:59:33 +00:00
|
|
|
got_object = s3_client.get_object(bucket, object_key)
|
2022-10-04 08:22:04 +00:00
|
|
|
assert get_file_hash(got_object) == get_file_hash(file_name_large)
|
|
|
|
|
2024-06-24 23:27:54 +00:00
|
|
|
with reporter.step("Delete the object"):
|
|
|
|
s3_client.delete_object(bucket, object_key, version_id)
|
2024-04-22 14:59:32 +00:00
|
|
|
|
2024-06-28 12:15:25 +00:00
|
|
|
with reporter.step("There should be no objects in bucket"):
|
2024-04-22 14:59:32 +00:00
|
|
|
objects_list = s3_client.list_objects(bucket)
|
|
|
|
assert not objects_list, f"Expected empty bucket, got {objects_list}"
|
|
|
|
|
2024-06-28 12:15:25 +00:00
|
|
|
with reporter.step("There should be no objects in container"):
|
2024-04-22 14:59:32 +00:00
|
|
|
objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
|
|
|
|
assert len(objects) == 0, f"Expected no objects in container, got\n{objects}"
|
|
|
|
|
2023-09-08 10:35:34 +00:00
|
|
|
@allure.title("Abort Multipart Upload (s3_client={s3_client})")
|
2023-08-29 13:43:16 +00:00
|
|
|
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
|
2023-04-06 18:47:08 +00:00
|
|
|
def test_s3_abort_multipart(
|
2023-05-15 09:59:33 +00:00
|
|
|
self,
|
|
|
|
s3_client: S3ClientWrapper,
|
2024-03-11 16:34:54 +00:00
|
|
|
default_wallet: WalletInfo,
|
2023-05-15 09:59:33 +00:00
|
|
|
bucket: str,
|
2023-08-02 11:54:03 +00:00
|
|
|
simple_object_size: ObjectSize,
|
|
|
|
complex_object_size: ObjectSize,
|
2024-09-02 10:25:13 +00:00
|
|
|
bucket_container_resolver: BucketContainerResolver,
|
2023-04-06 18:47:08 +00:00
|
|
|
):
|
2023-08-02 11:54:03 +00:00
|
|
|
complex_file = generate_file(complex_object_size.value)
|
|
|
|
simple_file = generate_file(simple_object_size.value)
|
2023-04-06 18:47:08 +00:00
|
|
|
to_upload = [complex_file, complex_file, simple_file]
|
|
|
|
files_count = len(to_upload)
|
|
|
|
upload_key = "multipart_abort"
|
|
|
|
|
2024-06-28 12:15:25 +00:00
|
|
|
with reporter.step("Get related container_id for bucket"):
|
2024-01-10 10:43:52 +00:00
|
|
|
for cluster_node in self.cluster.cluster_nodes:
|
2024-09-02 10:25:13 +00:00
|
|
|
container_id = bucket_container_resolver.resolve(cluster_node, bucket)
|
2024-01-10 10:43:52 +00:00
|
|
|
if container_id:
|
|
|
|
break
|
2022-10-04 08:22:04 +00:00
|
|
|
|
2023-11-29 13:34:59 +00:00
|
|
|
with reporter.step("Create multipart upload"):
|
2023-05-15 09:59:33 +00:00
|
|
|
upload_id = s3_client.create_multipart_upload(bucket, upload_key)
|
2023-04-06 18:47:08 +00:00
|
|
|
|
2024-06-28 12:15:25 +00:00
|
|
|
with reporter.step(f"Upload {files_count} parts to multipart upload"):
|
2023-04-06 18:47:08 +00:00
|
|
|
for i, file in enumerate(to_upload, 1):
|
2023-05-15 09:59:33 +00:00
|
|
|
s3_client.upload_part(bucket, upload_key, upload_id, i, file)
|
2023-04-06 18:47:08 +00:00
|
|
|
|
2024-06-28 12:15:25 +00:00
|
|
|
with reporter.step(f"There should be {files_count} objects in bucket"):
|
2023-05-15 09:59:33 +00:00
|
|
|
parts = s3_client.list_parts(bucket, upload_key, upload_id)
|
2023-04-06 18:47:08 +00:00
|
|
|
assert len(parts) == files_count, f"Expected {files_count} parts, got\n{parts}"
|
|
|
|
|
2024-06-28 12:15:25 +00:00
|
|
|
with reporter.step(f"There should be {files_count} objects in container"):
|
2023-10-31 14:51:09 +00:00
|
|
|
objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
|
|
|
|
assert len(objects) == files_count, f"Expected {files_count} objects in container, got\n{objects}"
|
2022-10-04 08:22:04 +00:00
|
|
|
|
2023-11-29 13:34:59 +00:00
|
|
|
with reporter.step("Abort multipart upload"):
|
2023-05-15 09:59:33 +00:00
|
|
|
s3_client.abort_multipart_upload(bucket, upload_key, upload_id)
|
|
|
|
uploads = s3_client.list_multipart_uploads(bucket)
|
2023-04-06 18:47:08 +00:00
|
|
|
assert not uploads, f"Expected no uploads in bucket {bucket}"
|
|
|
|
|
2024-06-28 12:15:25 +00:00
|
|
|
with reporter.step("There should be no objects in bucket"):
|
2023-04-06 18:47:08 +00:00
|
|
|
with pytest.raises(Exception, match=self.NO_SUCH_UPLOAD):
|
2023-05-15 09:59:33 +00:00
|
|
|
s3_client.list_parts(bucket, upload_key, upload_id)
|
2023-04-06 18:47:08 +00:00
|
|
|
|
2024-06-28 12:15:25 +00:00
|
|
|
with reporter.step("There should be no objects in container"):
|
|
|
|
|
|
|
|
@wait_for_success(120, 10)
|
|
|
|
def check_no_objects():
|
|
|
|
objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
|
|
|
|
assert len(objects) == 0, f"Expected no objects in container, got\n{objects}"
|
|
|
|
|
|
|
|
check_no_objects()
|
2022-10-04 08:22:04 +00:00
|
|
|
|
2023-09-08 10:35:34 +00:00
|
|
|
@allure.title("Upload Part Copy (s3_client={s3_client})")
|
2023-08-29 13:43:16 +00:00
|
|
|
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
|
2023-05-15 09:59:33 +00:00
|
|
|
def test_s3_multipart_copy(self, s3_client: S3ClientWrapper, bucket: str):
|
2022-10-04 08:22:04 +00:00
|
|
|
parts_count = 3
|
|
|
|
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
|
2023-05-15 09:59:33 +00:00
|
|
|
object_key = s3_helper.object_key_from_file_path(file_name_large)
|
2022-10-04 08:22:04 +00:00
|
|
|
part_files = split_file(file_name_large, parts_count)
|
|
|
|
parts = []
|
|
|
|
objs = []
|
|
|
|
|
2023-11-29 13:34:59 +00:00
|
|
|
with reporter.step(f"Put {parts_count} objects in bucket"):
|
2022-10-04 08:22:04 +00:00
|
|
|
for part in part_files:
|
2023-05-15 09:59:33 +00:00
|
|
|
s3_client.put_object(bucket, part)
|
|
|
|
objs.append(s3_helper.object_key_from_file_path(part))
|
|
|
|
s3_helper.check_objects_in_bucket(s3_client, bucket, objs)
|
2022-10-04 08:22:04 +00:00
|
|
|
|
2023-11-29 13:34:59 +00:00
|
|
|
with reporter.step("Create multipart upload object"):
|
2023-05-15 09:59:33 +00:00
|
|
|
upload_id = s3_client.create_multipart_upload(bucket, object_key)
|
|
|
|
uploads = s3_client.list_multipart_uploads(bucket)
|
2024-06-24 23:27:54 +00:00
|
|
|
assert len(uploads) == 1, f"Expected one upload in bucket {bucket}"
|
|
|
|
assert uploads[0].get("Key") == object_key, f"Expected correct key {object_key} in upload {uploads}"
|
|
|
|
assert uploads[0].get("UploadId") == upload_id, f"Expected correct UploadId {upload_id} in upload {uploads}"
|
2022-10-04 08:22:04 +00:00
|
|
|
|
2023-11-29 13:34:59 +00:00
|
|
|
with reporter.step("Upload parts to multipart upload"):
|
2022-10-04 08:22:04 +00:00
|
|
|
for part_id, obj_key in enumerate(objs, start=1):
|
2023-10-31 14:51:09 +00:00
|
|
|
etag = s3_client.upload_part_copy(bucket, object_key, upload_id, part_id, f"{bucket}/{obj_key}")
|
2022-10-04 08:22:04 +00:00
|
|
|
parts.append((part_id, etag))
|
2023-05-15 09:59:33 +00:00
|
|
|
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
|
|
|
|
|
2023-11-29 13:34:59 +00:00
|
|
|
with reporter.step("Complete multipart upload"):
|
2023-05-15 09:59:33 +00:00
|
|
|
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
|
2023-10-31 14:51:09 +00:00
|
|
|
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
|
2022-10-04 08:22:04 +00:00
|
|
|
|
2024-06-28 12:15:25 +00:00
|
|
|
with reporter.step("Get whole object from bucket"):
|
2023-05-15 09:59:33 +00:00
|
|
|
got_object = s3_client.get_object(bucket, object_key)
|
2022-10-04 08:22:04 +00:00
|
|
|
assert get_file_hash(got_object) == get_file_hash(file_name_large)
|