diff --git a/pytest_tests/testsuites/conftest.py b/pytest_tests/testsuites/conftest.py index 6e778dfa..28747b92 100644 --- a/pytest_tests/testsuites/conftest.py +++ b/pytest_tests/testsuites/conftest.py @@ -15,6 +15,7 @@ from frostfs_testlib.resources import optionals from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, SIMPLE_OBJECT_SIZE from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.s3.s3_http_client import S3HttpClient from frostfs_testlib.shell import LocalShell, Shell from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, FROSTFS_CLI_EXEC from frostfs_testlib.steps.cli.object import get_netmap_netinfo @@ -301,6 +302,20 @@ def s3_client( return client +@allure.title("[Session] Create S3 http client") +@pytest.fixture(scope="session") +def s3_http_client( + default_user: User, s3_policy: Optional[str], cluster: Cluster, credentials_provider: CredentialsProvider +) -> S3HttpClient: + node = cluster.cluster_nodes[0] + credentials_provider.S3.provide(default_user, node, s3_policy) + return S3HttpClient( + cluster.default_s3_gate_endpoint, + default_user.s3_credentials.access_key, + default_user.s3_credentials.secret_key, + ) + + @pytest.fixture def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus: if "param" in request.__dict__: @@ -443,6 +458,14 @@ def default_user(credentials_provider: CredentialsProvider, cluster: Cluster) -> return user +@pytest.fixture(scope="session") +@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES) +def users_pool(credentials_provider: CredentialsProvider, cluster: Cluster) -> list[User]: + users = [User(string_utils.unique_name("user-")) for _ in range(WALLTETS_IN_POOL)] + parallel(credentials_provider.GRPC.provide, users, cluster_node=cluster.cluster_nodes[0]) + return users + + @reporter.step("Get wallet for default user") @pytest.fixture(scope="session") def default_wallet(default_user: User) -> WalletInfo: @@ -451,11 +474,8 @@ def default_wallet(default_user: User) -> WalletInfo: @pytest.fixture(scope="session") @cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES) -def wallets_pool(credentials_provider: CredentialsProvider, cluster: Cluster) -> list[WalletInfo]: - users = [User(string_utils.unique_name("user-")) for _ in range(WALLTETS_IN_POOL)] - parallel(credentials_provider.GRPC.provide, users, cluster_node=cluster.cluster_nodes[0]) - - return [user.wallet for user in users] +def wallets_pool(users_pool: list[User]) -> list[WalletInfo]: + return [user.wallet for user in users_pool] @pytest.fixture(scope="session") diff --git a/pytest_tests/testsuites/services/s3_gate/test_s3_ACL.py b/pytest_tests/testsuites/services/s3_gate/test_s3_ACL.py index f5d84fd5..c83505fd 100644 --- a/pytest_tests/testsuites/services/s3_gate/test_s3_ACL.py +++ b/pytest_tests/testsuites/services/s3_gate/test_s3_ACL.py @@ -33,32 +33,32 @@ class TestS3GateACL: def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper): with reporter.step("Create bucket with ACL private"): bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private") - bucket_grants = s3_client.get_bucket_acl(bucket) + bucket_grants = s3_client.get_bucket_acl(bucket).get("Grants") s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS) with reporter.step("Create bucket with ACL public-read"): read_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read") - bucket_grants = s3_client.get_bucket_acl(read_bucket) + bucket_grants = s3_client.get_bucket_acl(read_bucket).get("Grants") s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS) with reporter.step("Create bucket with ACL public-read-write"): public_rw_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write") - bucket_grants = s3_client.get_bucket_acl(public_rw_bucket) + bucket_grants = s3_client.get_bucket_acl(public_rw_bucket).get("Grants") s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS) @allure.title("Bucket ACL (s3_client={s3_client})") def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper): with reporter.step("Create bucket with public-read-write ACL"): bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write") - bucket_grants = s3_client.get_bucket_acl(bucket) + bucket_grants = s3_client.get_bucket_acl(bucket).get("Grants") s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS) with reporter.step("Change bucket ACL to private"): s3_client.put_bucket_acl(bucket, acl="private") - bucket_grants = s3_client.get_bucket_acl(bucket) + bucket_grants = s3_client.get_bucket_acl(bucket).get("Grants") s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS) with reporter.step("Change bucket ACL to public-read"): s3_client.put_bucket_acl(bucket, acl="public-read") - bucket_grants = s3_client.get_bucket_acl(bucket) + bucket_grants = s3_client.get_bucket_acl(bucket).get("Grants") s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS) diff --git a/pytest_tests/testsuites/services/s3_gate/test_s3_http_object.py b/pytest_tests/testsuites/services/s3_gate/test_s3_http_object.py new file mode 100644 index 00000000..e3b33f04 --- /dev/null +++ b/pytest_tests/testsuites/services/s3_gate/test_s3_http_object.py @@ -0,0 +1,649 @@ +import random +import time +from datetime import datetime +from email.utils import formatdate + +import allure +import pytest +from frostfs_testlib import reporter +from frostfs_testlib.cli.generic_cli import GenericCli +from frostfs_testlib.credentials.interfaces import CredentialsProvider, User +from frostfs_testlib.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus +from frostfs_testlib.s3.s3_http_client import S3HttpClient +from frostfs_testlib.shell.interfaces import CommandOptions +from frostfs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.steps.s3 import s3_helper +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.object_size import ObjectSize +from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy +from frostfs_testlib.testing.cluster_test_base import ClusterTestBase +from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash, split_file +from frostfs_testlib.utils.string_utils import unique_name + +from ....resources.common import S3_POLICY_FILE_LOCATION + +FIVE_GIGABYTES = 5_368_709_120 +PART_SIZE_FOR_MULTIPART = 5 * 1024 * 1024 + + +@reporter.step("Allow patch for bucket") +def allow_patch_for_bucket(s3_client: S3ClientWrapper, bucket: str): + s3_client.put_bucket_policy( + bucket, + policy={ + "Version": "2012-10-17", + "Id": "aaaa-bbbb-cccc-dddd", + "Statement": [ + { + "Sid": "AddPerm", + "Effect": "Allow", + "Principal": "*", + "Action": ["s3:PatchObject"], + "Resource": [f"arn:aws:s3:::{bucket}/*"], + }, + ], + }, + ) + + +def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: + if "s3_client" not in metafunc.fixturenames: + return + metafunc.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], ids=["s3policy"], indirect=True) + + +@pytest.fixture(scope="session", params=[pytest.param("rep3", marks=pytest.mark.rep), pytest.param("ec3.1", marks=pytest.mark.ec)]) +def placement_policy(request: pytest.FixtureRequest) -> PlacementPolicy: + if request.param == "ec3.1": + return PlacementPolicy("ec3.1", "ec3.1") + return PlacementPolicy("rep3", "rep3") + + +@pytest.fixture(scope="session") +def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus: + if "param" in request.__dict__: + return request.param + return VersioningStatus.UNDEFINED + + +@pytest.fixture( + scope="session", + params=[ + pytest.param(AwsCliClient, marks=[pytest.mark.aws, pytest.mark.weekly]), + pytest.param(Boto3ClientWrapper, marks=[pytest.mark.boto3, pytest.mark.nightly]), + ], +) +def s3_client_cls(request: pytest.FixtureRequest) -> AwsCliClient | Boto3ClientWrapper: + return request.param + + +@allure.title("[Class]: Create S3 client") +@pytest.fixture(scope="class") +def s3_client( + users_pool: list[User], + s3_policy: str | None, + cluster: Cluster, + credentials_provider: CredentialsProvider, + s3_client_cls: AwsCliClient | Boto3ClientWrapper, +) -> S3ClientWrapper: + user = users_pool[0] + node = cluster.cluster_nodes[0] + credentials_provider.S3.provide(user, node, s3_policy) + return s3_client_cls(user.s3_credentials.access_key, user.s3_credentials.secret_key, cluster.default_s3_gate_endpoint) + + +@allure.title("[Class] Create bucket") +@pytest.fixture(scope="class") +def bucket(s3_client: S3ClientWrapper, versioning_status: VersioningStatus, placement_policy: PlacementPolicy) -> str: + with reporter.step(f"Create bucket with location constraint {placement_policy.value}"): + bucket = s3_client.create_bucket(location_constraint=placement_policy.value) + + s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status) + allow_patch_for_bucket(s3_client, bucket) + + return bucket + + +@allure.title("[Class]: Create S3 client with another user") +@pytest.fixture(scope="class") +def another_s3_client( + users_pool: list[User], + s3_policy: str | None, + cluster: Cluster, + credentials_provider: CredentialsProvider, + s3_client_cls: AwsCliClient | Boto3ClientWrapper, + # Wait for first bucket to be created under another user + bucket: str, +) -> S3ClientWrapper: + user = users_pool[1] + node = cluster.cluster_nodes[0] + credentials_provider.S3.provide(user, node, s3_policy) + return s3_client_cls(user.s3_credentials.access_key, user.s3_credentials.secret_key, cluster.default_s3_gate_endpoint) + + +@allure.title("[Class] Create bucket under another user") +@pytest.fixture(scope="class") +def another_bucket(another_s3_client: S3ClientWrapper, versioning_status: VersioningStatus, placement_policy: PlacementPolicy) -> str: + with reporter.step(f"Create bucket with location constraint {placement_policy.value}"): + bucket = another_s3_client.create_bucket(location_constraint=placement_policy.value) + + s3_helper.set_bucket_versioning(another_s3_client, bucket, versioning_status) + allow_patch_for_bucket(another_s3_client, bucket) + + return bucket + + +@pytest.fixture(scope="function") +def original_object(s3_client: S3ClientWrapper, bucket: str, test_file: TestFile) -> str: + with reporter.step("Put object"): + key = s3_helper.object_key_from_file_path(test_file) + s3_client.put_object(bucket, test_file, key) + return key + + +@pytest.mark.nightly +@pytest.mark.s3_gate +class TestS3GateHttpObject(ClusterTestBase): + @allure.title("Patch simple object payload (range={patch_range}, s3_client={s3_client_cls}, policy={placement_policy})") + @pytest.mark.parametrize("object_size", ["simple"], indirect=True) + @pytest.mark.parametrize( + "patch_range", + # String "object" denotes size of object. + ["0:19", "500:550", "object/2-100:object/2+200", "object-1:object", "object:object", "object:object+123"], + ) + def test_patch_simple_object_payload( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + original_object: str, + object_size: ObjectSize, + patch_range: str, + ): + start, end = s3_helper.get_range_relative_to_object(patch_range, object_size.value, int_values=True) + content_size = end - start + 1 + content_range = f"bytes {start}-{end}/*" + + with reporter.step("Generate payload object"): + content_file = generate_file(content_size) + + with reporter.step("Patch simple object"): + s3_http_client.patch_object(bucket, original_object, content_file, content_range) + + with reporter.step("Get patched part of object and make sure it has changed correctly"): + patched_file_part = s3_client.get_object(bucket, original_object, object_range=(start, end)) + assert get_file_hash(patched_file_part) == get_file_hash( + content_file + ), "Expected content hash did not match actual content hash" + + @allure.title("Patch complex object payload (range={patch_range}, s3_client={s3_client_cls}, policy={placement_policy})") + @pytest.mark.parametrize("object_size", ["complex"], indirect=True) + @pytest.mark.parametrize( + "patch_range", + # Strings "object" and "part" denote size of object and its part, respectively. + ["part:part+100", "object-part:object", "0:part", "part*2:part*3", "part-1:part*2", "part+1:part*2-1"], + ) + def test_patch_complex_object_payload( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + original_object: str, + object_size: ObjectSize, + max_object_size: int, + patch_range: str, + ): + start, end = s3_helper.get_range_relative_to_object(patch_range, object_size.value, max_object_size, int_values=True) + content_size = end - start + 1 + content_range = f"bytes {start}-{end}/*" + + with reporter.step("Generate payload object"): + content_file = generate_file(content_size) + + with reporter.step("Patch complex object"): + s3_http_client.patch_object(bucket, original_object, content_file, content_range) + + with reporter.step("Get patched part of object and make sure it has changed correctly"): + patched_file_part = s3_client.get_object(bucket, original_object, object_range=(start, end)) + assert get_file_hash(patched_file_part) == get_file_hash( + content_file + ), "Expected content hash did not match actual content hash" + + @allure.title( + "Patch object with fulfilled If-Match condition (s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})" + ) + def test_patch_with_fulfilled_if_match_contidion( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + original_object: str, + ): + start, end = 100, 199 + content_size = end - start + 1 + content_range = f"bytes {start}-{end}/*" + + with reporter.step("Generate payload object"): + content_file = generate_file(content_size) + expected_hash = get_file_hash(content_file) + + with reporter.step("Get object ETag attribute"): + object_info = s3_client.head_object(bucket, original_object) + etag = object_info["ETag"] + + with reporter.step("Patch object with If-Match header"): + s3_http_client.patch_object(bucket, original_object, content_file, content_range, if_match=etag) + + with reporter.step("Get patched object and make sure it has changed correctly"): + patched_file = s3_client.get_object(bucket, original_object) + patched_hash = get_file_hash(patched_file, offset=start, len=content_size) + assert patched_hash == expected_hash, "Expected content hash did not match actual content hash" + + @allure.title( + "[NEGATIVE] Patch cannot be applied with failed If-Match condition " + "(s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})" + ) + def test_patch_with_failed_if_match_condition(self, s3_http_client: S3HttpClient, bucket: str, original_object: str): + with reporter.step("Try patch object with If-Match header and get exception"): + with pytest.raises(Exception, match="PreconditionFailed"): + s3_http_client.patch_object(bucket, original_object, "content", "bytes 0-6/*", if_match="nonexistentetag") + + @allure.title( + "Patch object with fulfilled If-Unmodified-Since condition " + "(s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})" + ) + def test_patch_with_fulfilled_if_unmodified_since_condition( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + original_object: str, + ): + start, end = 235, 341 + content_size = end - start + 1 + content_range = f"bytes {start}-{end}/*" + + with reporter.step("Generate payload object"): + content_file = generate_file(content_size) + expected_hash = get_file_hash(content_file) + + with reporter.step("Get object LastModified attribute"): + response = s3_client.head_object(bucket, original_object) + if isinstance(response["LastModified"], str): + response["LastModified"] = datetime.fromisoformat(response["LastModified"]) + + # Convert datetime to RFC 7232 format + last_modified = formatdate(response["LastModified"].timestamp(), localtime=False, usegmt=True) + + with reporter.step("Patch object with If-Unmodified-Since header"): + s3_http_client.patch_object(bucket, original_object, content_file, content_range, if_unmodified_since=last_modified) + + with reporter.step("Get patched object and make sure it has changed correctly"): + patched_file = s3_client.get_object(bucket, original_object) + patched_hash = get_file_hash(patched_file, offset=start, len=content_size) + assert patched_hash == expected_hash, "Expected content hash did not match actual content hash" + + @allure.title( + "[NEGATIVE] Patch cannot be applied with failed If-Unmodified-Since condition " + "(s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})" + ) + def test_patch_with_failed_if_unmodified_since_condition( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + original_object: str, + test_file: TestFile, + ): + with reporter.step("Get original object LastModified attribute"): + response = s3_client.head_object(bucket, original_object) + if isinstance(response["LastModified"], str): + response["LastModified"] = datetime.fromisoformat(response["LastModified"]) + + # Convert datetime to RFC 7232 format + previous_last_modified = formatdate(response["LastModified"].timestamp(), localtime=False, usegmt=True) + + with reporter.step("Wait two seconds for LastModified to update"): + # Next PUT for a simple object occurs at the same second the object was initially loaded, + # so the LastModified attribute "as if" does not change after the operation. + time.sleep(2) + + with reporter.step("Put new data for existing object"): + s3_client.put_object(bucket, test_file, original_object) + + with reporter.step("Get object LastModified attribute with new data and make sure it has changed"): + response = s3_client.head_object(bucket, original_object) + if isinstance(response["LastModified"], str): + response["LastModified"] = datetime.fromisoformat(response["LastModified"]) + + # Convert datetime to RFC 7232 format + last_modified = formatdate(response["LastModified"].timestamp(), localtime=False, usegmt=True) + assert last_modified != previous_last_modified, f"Attribute LastModified was expected to change: {last_modified}" + + with reporter.step("Try patch object with If-Unmodified-Since header and get exception"): + with pytest.raises(Exception, match="PreconditionFailed"): + s3_http_client.patch_object(bucket, original_object, b"modify", "bytes 0-5/*", if_unmodified_since=previous_last_modified) + + @allure.title( + "Patch object with fulfilled x-amz-expected-bucket-owner condition " + "(s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})" + ) + def test_patch_with_fulfilled_if_expected_bucket_owner_condition( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + original_object: str, + ): + start, end = 512, 749 + content_size = end - start + 1 + content_range = f"bytes {start}-{end}/*" + + with reporter.step("Generate payload object"): + content_file = generate_file(content_size) + expected_hash = get_file_hash(content_file) + + with reporter.step("Get bucket owner ID"): + bucket_acl = s3_client.get_bucket_acl(bucket) + expected_bucket_owner = bucket_acl["Owner"]["DisplayName"] + + with reporter.step("Patch object with x-amz-expected-bucket-owner header"): + s3_http_client.patch_object( + bucket, + original_object, + content_file, + content_range, + x_amz_expected_bucket_owner=expected_bucket_owner, + ) + + with reporter.step("Get patched object and make sure it has changed correctly"): + patched_file = s3_client.get_object(bucket, original_object) + patched_hash = get_file_hash(patched_file, offset=start, len=content_size) + assert patched_hash == expected_hash, "Expected content hash did not match actual content hash" + + @allure.title( + "[NEGATIVE] Patch cannot be applied with non-existent bucket owner ID in x-amz-expected-bucket-owner header " + "(s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})" + ) + def test_patch_with_non_existent_bucket_owner_id( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + original_object: str, + ): + with reporter.step("Get bucket owner ID"): + bucket_acl = s3_client.get_bucket_acl(bucket) + bucket_owner = bucket_acl["Owner"]["DisplayName"] + + with reporter.step("Change owner ID to non-existent"): + unexpected_bucket_owner = list(bucket_owner) + random.shuffle(unexpected_bucket_owner) + unexpected_bucket_owner = "".join(unexpected_bucket_owner) + + with reporter.step("Try patch object with x-amz-expected-bucket-owner header and get exception"): + with pytest.raises(Exception, match="AccessDenied"): + s3_http_client.patch_object( + bucket, + original_object, + b"blablabla", + "bytes 10-18/*", + x_amz_expected_bucket_owner=unexpected_bucket_owner, + ) + + @allure.title( + "[NEGATIVE] Patch cannot be applied with another bucket owner ID in x-amz-expected-bucket-owner header " + "(s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})" + ) + def test_patch_with_another_bucket_owner_id( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + original_object: str, + another_bucket: str, + ): + with reporter.step("Get owner ID of another bucket"): + bucket_acl = s3_client.get_bucket_acl(another_bucket) + another_bucket_owner = bucket_acl["Owner"]["DisplayName"] + + with reporter.step("Try patch object with x-amz-expected-bucket-owner header and get exception"): + with pytest.raises(Exception, match="AccessDenied"): + s3_http_client.patch_object( + bucket, + original_object, + b"blablabla", + "bytes 10-18/*", + x_amz_expected_bucket_owner=another_bucket_owner, + ) + + @allure.title( + "[NEGATIVE] Patch cannot be applied with invalid Content-Range header " + "(range={patch_range}, s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})" + ) + @pytest.mark.parametrize( + "patch_range", + # String "object" denotes size of object. + ["object+100:200", "object+10:object+16", "-1:1", "20:100", "0:2", f"0:{FIVE_GIGABYTES}", "0:0"], + ) + def test_patch_with_invalid_content_range( + self, + s3_http_client: S3HttpClient, + bucket: str, + original_object: str, + object_size: ObjectSize, + patch_range: str, + ): + content_range = s3_helper.get_range_relative_to_object(patch_range, object_size.value) + with reporter.step("Try patch object with invalid Content-Range header and get exception"): + with pytest.raises(Exception, match="InvalidRange"): + s3_http_client.patch_object(bucket, original_object, b"content", content_range) + + @allure.title( + "[NEGATIVE] Patch cannot be applied without Content-Range header " + "(s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})" + ) + def test_patch_without_content_range(self, s3_http_client: S3HttpClient, bucket: str, original_object: str): + with reporter.step("Try patch object without Content-Range header and get exception"): + with pytest.raises(Exception, match="MissingContentRange"): + s3_http_client.patch_object(bucket, original_object, b"content", None) + + @allure.title( + "[NEGATIVE] Patch cannot be applied without Content-Length header " + "(s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})" + ) + def test_patch_without_content_length( + self, + s3_http_client: S3HttpClient, + bucket: str, + original_object: str, + node_under_test: ClusterNode, + ): + with reporter.step("Generate headers that comply with AWS specification"): + data = "content" + url = f"{self.cluster.default_s3_gate_endpoint}/{bucket}/{original_object}" + host = self.cluster.default_s3_gate_endpoint[8:] + headers = {"Host": host, "Url": url, "Content-Range": "bytes 0-6/*"} + headers = dict(s3_http_client._create_aws_request("PATCH", url, headers, data).headers) + headers.pop("Content-Length", None) + + with reporter.step("Try patch object without Content-Length header and get exception"): + curl = GenericCli("curl", node_under_test.host) + request = f" {url} -X PATCH" + + for header, value in headers.items(): + request += f" -H '{header}: {value}'" + + # Remove Content-Length header + request += " -H 'Content-Length:'" + request += f" -d '{data}' -k" + + response = curl(request, shell=LocalShell(), options=CommandOptions(check=False)) + assert "MissingContentLength" in response.stdout, response.stdout + + @allure.title("[NEGATIVE] Patch cannot be applied to non-existent bucket") + def test_patch_non_existent_bucket(self, s3_http_client: S3HttpClient): + with reporter.step("Try patch object in non-existent bucket and get exception"): + with pytest.raises(Exception, match="NoSuchBucket"): + s3_http_client.patch_object("fake-bucket", unique_name("object-"), b"content", "bytes 0-6/*") + + @allure.title("[NEGATIVE] Patch cannot be applied to non-existent object (s3_client={s3_client_cls}, policy={placement_policy})") + def test_patch_non_existent_object(self, s3_http_client: S3HttpClient, bucket: str): + with reporter.step("Try patch non-existent object and get exception"): + with pytest.raises(Exception, match="NoSuchKey"): + s3_http_client.patch_object(bucket, "fake-object", b"content", "bytes 0-6/*") + + @allure.title("Patch object in versioned bucket (s3_client={s3_client_cls}, object_size={object_size}, policy={placement_policy})") + @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True) + def test_patch_object_in_versioned_bucket( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + object_size: ObjectSize, + ): + patch_ranges = ["0:35", "40:49", "object-100:object", "object:object+231"] + + with reporter.step("Generate original object"): + original_file = generate_file(object_size.value) + original_key = s3_helper.object_key_from_file_path(original_file) + + with reporter.step("Put object"): + version = s3_client.put_object(bucket, original_file, original_key) + expected_versions = {version} + + with reporter.step("Patch versioned object"): + for rng in patch_ranges: + start, end = s3_helper.get_range_relative_to_object(rng, object_size=object_size.value, int_values=True) + content_size = end - start + 1 + content_range = f"bytes {start}-{end}/*" + + with reporter.step(f"Generate payload object of {content_size} bytes"): + content_file = generate_file(content_size) + + with reporter.step(f"Patch object and get new version"): + response = s3_http_client.patch_object(bucket, original_key, content_file, content_range, version_id=version) + version = response["VersionId"] + expected_versions.add(version) + + with reporter.step(f"Get patched part of object and make sure it has changed correctly"): + got_part = s3_client.get_object(bucket, original_key, version_id=version, object_range=(start, end)) + assert get_file_hash(got_part) == get_file_hash(content_file), "Expected content hash did not match actual content hash" + + with reporter.step("Check that all expected versions are in bucket"): + got_versions = { + version.get("VersionId") for version in s3_client.list_objects_versions(bucket) if version.get("Key") == original_key + } + assert expected_versions == got_versions, f"Expected versions of object are missing from bucket: {expected_versions}" + + @allure.title("Patch multipart object (range={patch_range}, s3_client={s3_client_cls}, policy={placement_policy})") + @pytest.mark.parametrize("patch_range", ["0:part-1", "part:part*2-1", "part-100:part*2+200", "object-part-1:object"]) + def test_s3_patch_multipart_object( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + patch_range: str, + ): + parts_count = 5 + parts = [] + original_size = PART_SIZE_FOR_MULTIPART * parts_count + + with reporter.step("Generate original object and split it into parts"): + original_file = generate_file(original_size) + file_parts = split_file(original_file, parts_count) + object_key = s3_helper.object_key_from_file_path(original_file) + + start, end = s3_helper.get_range_relative_to_object( + patch_range, object_size=original_size, part_size=PART_SIZE_FOR_MULTIPART, int_values=True + ) + content_size = end - start + 1 + content_range = f"bytes {start}-{end}/*" + + with reporter.step("Generate payload object"): + content_file = generate_file(content_size) + + with reporter.step("Create multipart and upload parts"): + upload_id = s3_client.create_multipart_upload(bucket, object_key) + for part_id, file_path in enumerate(file_parts, start=1): + etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path) + parts.append((part_id, etag)) + + with reporter.step("Check all parts are visible in bucket"): + got_parts = s3_client.list_parts(bucket, object_key, upload_id) + assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}" + + with reporter.step("Complete multipart upload"): + s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts) + + with reporter.step("Patch multipart object"): + s3_http_client.patch_object(bucket, object_key, content_file, content_range, timeout=200) + + with reporter.step("Get patched part of object and make sure it has changed correctly"): + got_part = s3_client.get_object(bucket, object_key, object_range=(start, end)) + assert get_file_hash(got_part) == get_file_hash(content_file), "Expected content hash did not match actual content hash" + + @allure.title("Patch multipart object in versioned bucket (s3_client={s3_client_cls}, policy={placement_policy})") + @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True) + def test_s3_patch_multipart_object_in_versioned_bucket( + self, + s3_client: S3ClientWrapper, + s3_http_client: S3HttpClient, + bucket: str, + ): + parts = [] + parts_count = 5 + original_size = PART_SIZE_FOR_MULTIPART * parts_count + patch_ranges = ["0:part-1", "part:part*2-1", "part-100:part*2+200", "object-part-1:object"] + + with reporter.step("Generate original object and split it into parts"): + original_file = generate_file(original_size) + original_key = s3_helper.object_key_from_file_path(original_file) + file_parts = split_file(original_file, parts_count) + + with reporter.step("Create multipart and upload parts"): + upload_id = s3_client.create_multipart_upload(bucket, original_key) + for part_id, file_path in enumerate(file_parts, start=1): + etag = s3_client.upload_part(bucket, original_key, upload_id, part_id, file_path) + parts.append((part_id, etag)) + + with reporter.step("Check all parts are visible in bucket"): + got_parts = s3_client.list_parts(bucket, original_key, upload_id) + assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}" + + with reporter.step("Complete multipart upload"): + response = s3_client.complete_multipart_upload(bucket, original_key, upload_id, parts) + version = response["VersionId"] + expected_versions = {version} + + with reporter.step("Patch versioned multipart object"): + for rng in patch_ranges: + start, end = s3_helper.get_range_relative_to_object( + rng, object_size=original_size, part_size=PART_SIZE_FOR_MULTIPART, int_values=True + ) + content_size = end - start + 1 + content_range = f"bytes {start}-{end}/*" + + with reporter.step("Generate payload object"): + content_file = generate_file(content_size) + + with reporter.step("Patch multipart object and get new version"): + response = s3_http_client.patch_object( + bucket, original_key, content_file, content_range, version_id=version, timeout=200 + ) + version = response["VersionId"] + expected_versions.add(version) + + with reporter.step("Get patched part of object and make sure it has changed correctly"): + got_part = s3_client.get_object(bucket, original_key, version_id=version, object_range=(start, end)) + assert get_file_hash(got_part) == get_file_hash(content_file), "Expected content hash did not match actual content hash" + + with reporter.step("Check that all expected versions are in bucket"): + got_versions = { + version.get("VersionId") for version in s3_client.list_objects_versions(bucket) if version.get("Key") == original_key + } + assert expected_versions == got_versions, f"Expected versions of object are missing from bucket: {expected_versions}" + + # TODO: Negative scenario for SSE objects is postponed for now.