forked from TrueCloudLab/frostfs-testcases
Compare commits
5 commits
453286d459
...
639cfd5da2
Author | SHA1 | Date | |
---|---|---|---|
639cfd5da2 | |||
29aca20956 | |||
e3f13b6f25 | |||
5a0699dc78 | |||
55137e7a1e |
6 changed files with 1735 additions and 134 deletions
|
@ -15,6 +15,7 @@ from frostfs_testlib.resources import optionals
|
|||
from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, SIMPLE_OBJECT_SIZE
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||
from frostfs_testlib.s3.s3_http_client import S3HttpClient
|
||||
from frostfs_testlib.shell import LocalShell, Shell
|
||||
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, FROSTFS_CLI_EXEC
|
||||
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
|
||||
|
@ -301,6 +302,20 @@ def s3_client(
|
|||
return client
|
||||
|
||||
|
||||
@allure.title("[Session] Create S3 http client")
|
||||
@pytest.fixture(scope="session")
|
||||
def s3_http_client(
|
||||
default_user: User, s3_policy: Optional[str], cluster: Cluster, credentials_provider: CredentialsProvider
|
||||
) -> S3HttpClient:
|
||||
node = cluster.cluster_nodes[0]
|
||||
credentials_provider.S3.provide(default_user, node, s3_policy)
|
||||
return S3HttpClient(
|
||||
cluster.default_s3_gate_endpoint,
|
||||
default_user.s3_credentials.access_key,
|
||||
default_user.s3_credentials.secret_key,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus:
|
||||
if "param" in request.__dict__:
|
||||
|
|
|
@ -193,7 +193,7 @@ class TestFailoverNetwork(ClusterTestBase):
|
|||
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
|
||||
|
||||
with reporter.step("Get object for target nodes to data interfaces, expect false"):
|
||||
with pytest.raises(RuntimeError, match="can't create API client: can't init SDK client: gRPC dial: context deadline exceeded"):
|
||||
with pytest.raises(RuntimeError, match="can't create API client: can't init SDK client: context deadline exceeded"):
|
||||
get_object(
|
||||
wallet=default_wallet,
|
||||
cid=storage_object.cid,
|
||||
|
@ -217,6 +217,7 @@ class TestFailoverNetwork(ClusterTestBase):
|
|||
|
||||
@pytest.mark.interfaces
|
||||
@allure.title("Block INTERNAL interface node")
|
||||
@requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1 CBF 1"))
|
||||
def test_block_internal_interface(
|
||||
self,
|
||||
cluster_state_controller: ClusterStateController,
|
||||
|
|
1036
pytest_tests/testsuites/object/test_object_api_patch.py
Normal file
1036
pytest_tests/testsuites/object/test_object_api_patch.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -20,8 +20,9 @@ from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper
|
|||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||
from frostfs_testlib.testing.test_control import wait_for_success
|
||||
from frostfs_testlib.utils import datetime_utils
|
||||
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
|
||||
from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash
|
||||
|
||||
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
|
||||
from ...resources.common import S3_POLICY_FILE_LOCATION
|
||||
|
||||
|
||||
|
@ -142,39 +143,36 @@ class TestECReplication(ClusterTestBase):
|
|||
assert len(chunks_nodes) == count
|
||||
|
||||
@allure.title("Create container with EC policy (size={object_size})")
|
||||
def test_create_container_with_ec_policy(self, object_size: ObjectSize, rep_count: int, grpc_client: GrpcClientWrapper) -> None:
|
||||
test_file = generate_file(object_size.value)
|
||||
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
|
||||
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
|
||||
def test_create_container_with_ec_policy(
|
||||
self, container: str, rep_count: int, grpc_client: GrpcClientWrapper, test_file: TestFile
|
||||
) -> None:
|
||||
with reporter.step("Put object in container."):
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Check replication chunks."):
|
||||
assert self.check_replication(rep_count, grpc_client, cid, oid)
|
||||
assert self.check_replication(rep_count, grpc_client, container, oid)
|
||||
|
||||
@allure.title("Lose node with chunk data")
|
||||
@pytest.mark.failover
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 3.1"))
|
||||
def test_lose_node_with_data_chunk(
|
||||
self,
|
||||
grpc_client: GrpcClientWrapper,
|
||||
simple_object_size: ObjectSize,
|
||||
cluster_state_controller: ClusterStateController,
|
||||
container: str,
|
||||
disable_policer: None,
|
||||
) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 3.1", await_mode=True)
|
||||
|
||||
with reporter.step("Put object in container."):
|
||||
test_file = generate_file(simple_object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Check chunk replication on 4 nodes."):
|
||||
assert self.check_replication(4, grpc_client, cid, oid)
|
||||
assert self.check_replication(4, grpc_client, container, oid)
|
||||
|
||||
with reporter.step("Search node data chunk"):
|
||||
chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, cid, oid=oid)
|
||||
chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
|
||||
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)
|
||||
|
||||
with reporter.step("Stop node with data chunk."):
|
||||
|
@ -182,33 +180,32 @@ class TestECReplication(ClusterTestBase):
|
|||
|
||||
with reporter.step("Get object"):
|
||||
node = list(set(self.cluster.cluster_nodes) - {chunk_node[0]})[0]
|
||||
grpc_client.object.get(cid, oid, node.storage_node.get_rpc_endpoint())
|
||||
grpc_client.object.get(container, oid, node.storage_node.get_rpc_endpoint())
|
||||
|
||||
with reporter.step("Start stopped node, and check replication chunks."):
|
||||
cluster_state_controller.start_node_host(chunk_node[0])
|
||||
self.wait_replication(4, grpc_client, cid, oid)
|
||||
self.wait_replication(4, grpc_client, container, oid)
|
||||
|
||||
@allure.title("Lose node with chunk parity")
|
||||
@pytest.mark.failover
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 3.1"))
|
||||
def test_lose_node_with_parity_chunk(
|
||||
self,
|
||||
grpc_client: GrpcClientWrapper,
|
||||
simple_object_size: ObjectSize,
|
||||
cluster_state_controller: ClusterStateController,
|
||||
container: str,
|
||||
disable_policer: None,
|
||||
) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 3.1", await_mode=True)
|
||||
|
||||
with reporter.step("Put object in container."):
|
||||
test_file = generate_file(simple_object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Check chunk replication on 4 nodes."):
|
||||
assert self.check_replication(4, grpc_client, cid, oid)
|
||||
assert self.check_replication(4, grpc_client, container, oid)
|
||||
|
||||
with reporter.step("Search node with parity chunk"):
|
||||
chunk = grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, cid, oid=oid)
|
||||
chunk = grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, container, oid=oid)
|
||||
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)[0]
|
||||
|
||||
with reporter.step("Stop node parity chunk."):
|
||||
|
@ -216,35 +213,34 @@ class TestECReplication(ClusterTestBase):
|
|||
|
||||
with reporter.step("Get object, expect success."):
|
||||
node = list(set(self.cluster.cluster_nodes) - {chunk_node})[0]
|
||||
grpc_client.object.get(cid, oid, node.storage_node.get_rpc_endpoint())
|
||||
grpc_client.object.get(container, oid, node.storage_node.get_rpc_endpoint())
|
||||
|
||||
with reporter.step("Start stoped node, and check replication chunks."):
|
||||
cluster_state_controller.start_node_host(chunk_node)
|
||||
self.wait_replication(4, grpc_client, cid, oid)
|
||||
self.wait_replication(4, grpc_client, container, oid)
|
||||
|
||||
@allure.title("Lose nodes with chunk data and parity")
|
||||
@pytest.mark.failover
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 3.1"))
|
||||
def test_lose_nodes_data_chunk_and_parity(
|
||||
self,
|
||||
grpc_client: GrpcClientWrapper,
|
||||
simple_object_size: ObjectSize,
|
||||
cluster_state_controller: ClusterStateController,
|
||||
container: str,
|
||||
disable_policer: None,
|
||||
) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 3.1", await_mode=True)
|
||||
|
||||
with reporter.step("Put object in container."):
|
||||
test_file = generate_file(simple_object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Check count chunks, expect 4."):
|
||||
assert self.check_replication(4, grpc_client, cid, oid)
|
||||
assert self.check_replication(4, grpc_client, container, oid)
|
||||
|
||||
with reporter.step("Search node data chunk and node parity chunk"):
|
||||
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, cid, oid=oid)
|
||||
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
|
||||
data_chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)[0]
|
||||
parity_chunk = grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, cid, oid=oid)
|
||||
parity_chunk = grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, container, oid=oid)
|
||||
parity_chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, parity_chunk)[0]
|
||||
|
||||
with reporter.step("Stop node with data chunk."):
|
||||
|
@ -252,11 +248,11 @@ class TestECReplication(ClusterTestBase):
|
|||
|
||||
with reporter.step("Get object"):
|
||||
node = list(set(self.cluster.cluster_nodes) - {data_chunk_node, parity_chunk_node})[0]
|
||||
grpc_client.object.get(cid, oid, node.storage_node.get_rpc_endpoint())
|
||||
grpc_client.object.get(container, oid, node.storage_node.get_rpc_endpoint())
|
||||
|
||||
with reporter.step("Start stopped host and check chunks."):
|
||||
cluster_state_controller.start_node_host(data_chunk_node)
|
||||
self.wait_replication(4, grpc_client, cid, oid)
|
||||
self.wait_replication(4, grpc_client, container, oid)
|
||||
|
||||
with reporter.step("Stop node with parity chunk and one all node."):
|
||||
cluster_state_controller.stop_node_host(data_chunk_node, "hard")
|
||||
|
@ -264,35 +260,34 @@ class TestECReplication(ClusterTestBase):
|
|||
|
||||
with reporter.step("Get object, expect error."):
|
||||
with pytest.raises(RuntimeError):
|
||||
grpc_client.object.get(cid, oid, node.storage_node.get_rpc_endpoint())
|
||||
grpc_client.object.get(container, oid, node.storage_node.get_rpc_endpoint())
|
||||
|
||||
with reporter.step("Start stopped nodes and check replication chunk."):
|
||||
cluster_state_controller.start_stopped_hosts()
|
||||
self.wait_replication(4, grpc_client, cid, oid)
|
||||
self.wait_replication(4, grpc_client, container, oid)
|
||||
|
||||
@allure.title("Policer work with chunk")
|
||||
@pytest.mark.failover
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
|
||||
def test_work_policer_with_nodes(
|
||||
self,
|
||||
simple_object_size: ObjectSize,
|
||||
grpc_client: GrpcClientWrapper,
|
||||
cluster_state_controller: ClusterStateController,
|
||||
container: str,
|
||||
include_excluded_nodes: None,
|
||||
) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
|
||||
|
||||
with reporter.step("Put object on container."):
|
||||
test_file = generate_file(simple_object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Check count chunks nodes on 3."):
|
||||
assert self.check_replication(3, grpc_client, cid, oid)
|
||||
assert self.check_replication(3, grpc_client, container, oid)
|
||||
|
||||
with reporter.step("Search node with chunk."):
|
||||
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, cid, oid=oid)
|
||||
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
|
||||
node_data_chunk = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)[0]
|
||||
first_all_chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
|
||||
first_all_chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
|
||||
|
||||
with reporter.step("Remove chunk node from network map"):
|
||||
cluster_state_controller.remove_node_from_netmap([node_data_chunk.storage_node])
|
||||
|
@ -305,10 +300,10 @@ class TestECReplication(ClusterTestBase):
|
|||
node = grpc_client.object.chunks.search_node_without_chunks(
|
||||
first_all_chunks, self.cluster, alive_node.storage_node.get_rpc_endpoint()
|
||||
)[0]
|
||||
self.wait_replication(3, grpc_client, cid, oid)
|
||||
self.wait_replication(3, grpc_client, container, oid)
|
||||
|
||||
with reporter.step("Get new chunks"):
|
||||
second_all_chunks = grpc_client.object.chunks.get_all(node.storage_node.get_rpc_endpoint(), cid, oid)
|
||||
second_all_chunks = grpc_client.object.chunks.get_all(node.storage_node.get_rpc_endpoint(), container, oid)
|
||||
|
||||
with reporter.step("Check that oid no change."):
|
||||
assert [chunk for chunk in second_all_chunks if data_chunk.object_id == chunk.object_id]
|
||||
|
@ -316,11 +311,11 @@ class TestECReplication(ClusterTestBase):
|
|||
with reporter.step("Include node in netmap"):
|
||||
cluster_state_controller.include_node_to_netmap(node_data_chunk.storage_node, alive_node.storage_node)
|
||||
|
||||
self.wait_sync_count_chunks_nodes(grpc_client, cid, oid, 3)
|
||||
self.wait_sync_count_chunks_nodes(grpc_client, container, oid, 3)
|
||||
|
||||
@allure.title("EC X.Y combinations (nodes={node_count},policy={ec_policy},size={object_size})")
|
||||
def test_create_container_with_difference_count_nodes(
|
||||
self, node_count: int, ec_policy: str, object_size: ObjectSize, grpc_client: GrpcClientWrapper
|
||||
self, frostfs_cli: FrostfsCli, node_count: int, ec_policy: str, object_size: ObjectSize, grpc_client: GrpcClientWrapper
|
||||
) -> None:
|
||||
with reporter.step("Create container."):
|
||||
expected_chunks = int(ec_policy.split(" ")[1].split(".")[0]) + int(ec_policy.split(" ")[1].split(".")[1])
|
||||
|
@ -328,6 +323,14 @@ class TestECReplication(ClusterTestBase):
|
|||
expected_chunks *= 4
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy=ec_policy, await_mode=True)
|
||||
|
||||
with reporter.step("Apply Ape rule for container"):
|
||||
frostfs_cli.ape_manager.add(
|
||||
self.cluster.default_rpc_endpoint, chain_id="allowAll", target_name=cid, target_type="container", rule="allow Object.* *"
|
||||
)
|
||||
|
||||
with reporter.step("Wait for one block"):
|
||||
self.wait_for_blocks()
|
||||
|
||||
with reporter.step("Put object in container."):
|
||||
test_file = generate_file(object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
|
@ -341,64 +344,60 @@ class TestECReplication(ClusterTestBase):
|
|||
assert get_file_hash(test_file) == get_file_hash(file_with_node)
|
||||
|
||||
@allure.title("Request PUT with copies_number flag")
|
||||
def test_put_object_with_copies_number(self, grpc_client: GrpcClientWrapper, simple_object_size: ObjectSize) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
|
||||
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
|
||||
def test_put_object_with_copies_number(self, container: str, grpc_client: GrpcClientWrapper, simple_object_size: ObjectSize) -> None:
|
||||
with reporter.step("Put object in container with copies number = 1"):
|
||||
test_file = generate_file(simple_object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint, copies_number=1)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint, copies_number=1)
|
||||
|
||||
with reporter.step("Check that count chunks > 1."):
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
|
||||
assert len(chunks) > 1
|
||||
|
||||
@allure.title("Request PUT and 1 node off")
|
||||
@pytest.mark.failover
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 3.1"))
|
||||
def test_put_object_with_off_cnr_node(
|
||||
self, grpc_client: GrpcClientWrapper, cluster_state_controller: ClusterStateController, simple_object_size: ObjectSize
|
||||
self,
|
||||
container: str,
|
||||
grpc_client: GrpcClientWrapper,
|
||||
cluster_state_controller: ClusterStateController,
|
||||
simple_object_size: ObjectSize,
|
||||
) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 3.1", await_mode=True)
|
||||
|
||||
with reporter.step("Stop one node in container nodes"):
|
||||
cluster_state_controller.stop_node_host(self.cluster.cluster_nodes[1], "hard")
|
||||
|
||||
with reporter.step("Put object in container, expect success for EC container."):
|
||||
test_file = generate_file(simple_object_size.value)
|
||||
grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint, copies_number=1)
|
||||
grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint, copies_number=1)
|
||||
|
||||
@allure.title("Request PUT (size={object_size})")
|
||||
def test_put_object_with_ec_cnr(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
|
||||
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
|
||||
def test_put_object_with_ec_cnr(self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
|
||||
with reporter.step("Put object in container"):
|
||||
test_file = generate_file(object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Get chunks object."):
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
|
||||
|
||||
with reporter.step("Check header chunks object"):
|
||||
for chunk in chunks:
|
||||
chunk_head = grpc_client.object.head(
|
||||
cid, chunk.object_id, self.cluster.default_rpc_endpoint, is_raw=True, json_output=False
|
||||
container, chunk.object_id, self.cluster.default_rpc_endpoint, is_raw=True, json_output=False
|
||||
).stdout
|
||||
assert "EC header:" in chunk_head
|
||||
|
||||
@allure.title("Request GET (size={object_size})")
|
||||
def test_get_object_in_ec_cnr(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1 CBF 1", await_mode=True)
|
||||
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1 CBF 1"))
|
||||
def test_get_object_in_ec_cnr(self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
|
||||
with reporter.step("Put object in container"):
|
||||
test_file = generate_file(object_size.value)
|
||||
hash_origin_file = get_file_hash(test_file)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Get id all chunks."):
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
|
||||
|
||||
with reporter.step("Search chunk node and not chunks node."):
|
||||
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunks[0])[0]
|
||||
|
@ -407,88 +406,80 @@ class TestECReplication(ClusterTestBase):
|
|||
]
|
||||
|
||||
with reporter.step("GET request with chunk node, expect success"):
|
||||
file_one = grpc_client.object.get(cid, oid, chunk_node.storage_node.get_rpc_endpoint())
|
||||
file_one = grpc_client.object.get(container, oid, chunk_node.storage_node.get_rpc_endpoint())
|
||||
hash_file_one = get_file_hash(file_one)
|
||||
assert hash_file_one == hash_origin_file
|
||||
|
||||
with reporter.step("Get request with not chunk node"):
|
||||
file_two = grpc_client.object.get(cid, oid, not_chunk_node.storage_node.get_rpc_endpoint())
|
||||
file_two = grpc_client.object.get(container, oid, not_chunk_node.storage_node.get_rpc_endpoint())
|
||||
hash_file_two = get_file_hash(file_two)
|
||||
assert hash_file_two == hash_file_one == hash_origin_file
|
||||
|
||||
@allure.title("Request SEARCH with flags 'root' (size={object_size})")
|
||||
def test_search_object_in_ec_cnr_root_flags(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
|
||||
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
|
||||
def test_search_object_in_ec_cnr_root_flags(self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
|
||||
with reporter.step("Put object in container"):
|
||||
test_file = generate_file(object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Search operation with --root flags"):
|
||||
search_output = grpc_client.object.search(cid, self.cluster.default_rpc_endpoint, root=True)
|
||||
search_output = grpc_client.object.search(container, self.cluster.default_rpc_endpoint, root=True)
|
||||
assert search_output[0] == oid
|
||||
|
||||
@allure.title("Request SEARCH check valid chunk id (size={object_size})")
|
||||
def test_search_object_in_ec_cnr_chunk_id(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
|
||||
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
|
||||
def test_search_object_in_ec_cnr_chunk_id(self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
|
||||
with reporter.step("Put object in container"):
|
||||
test_file = generate_file(object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Search operation object"):
|
||||
search_output = grpc_client.object.search(cid, self.cluster.default_rpc_endpoint)
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
|
||||
search_output = grpc_client.object.search(container, self.cluster.default_rpc_endpoint)
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
|
||||
for chunk in chunks:
|
||||
assert chunk.object_id in search_output
|
||||
|
||||
@allure.title("Request SEARCH check no chunk index info (size={object_size})")
|
||||
def test_search_object_in_ec_cnr(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
|
||||
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
|
||||
def test_search_object_in_ec_cnr(self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
|
||||
with reporter.step("Put object in container"):
|
||||
test_file = generate_file(object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Search operation all chunk"):
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
|
||||
for chunk in chunks:
|
||||
chunk_search = grpc_client.object.search(cid, self.cluster.default_rpc_endpoint, oid=chunk.object_id)
|
||||
chunk_search = grpc_client.object.search(container, self.cluster.default_rpc_endpoint, oid=chunk.object_id)
|
||||
assert "index" not in chunk_search
|
||||
|
||||
@allure.title("Request DELETE (size={object_size})")
|
||||
@pytest.mark.failover
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
|
||||
def test_delete_object_in_ec_cnr(
|
||||
self, grpc_client: GrpcClientWrapper, object_size: ObjectSize, cluster_state_controller: ClusterStateController
|
||||
self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize, cluster_state_controller: ClusterStateController
|
||||
) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
|
||||
|
||||
with reporter.step("Put object in container."):
|
||||
test_file = generate_file(object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Check object chunks nodes."):
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
|
||||
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
|
||||
replication_count = 3 if object_size.name == "simple" else 3 * 4
|
||||
assert len(chunks) == replication_count
|
||||
|
||||
with reporter.step("Delete object"):
|
||||
grpc_client.object.delete(cid, oid, self.cluster.default_rpc_endpoint)
|
||||
grpc_client.object.delete(container, oid, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Check that delete all chunks."):
|
||||
for chunk in chunks:
|
||||
with pytest.raises(RuntimeError, match="object already removed"):
|
||||
grpc_client.object.head(cid, chunk.object_id, self.cluster.default_rpc_endpoint)
|
||||
grpc_client.object.head(container, chunk.object_id, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Put second object."):
|
||||
oid_second = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid_second = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Check second object chunks nodes."):
|
||||
chunks_second_object = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid_second)
|
||||
chunks_second_object = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid_second)
|
||||
assert len(chunks_second_object) == replication_count
|
||||
|
||||
with reporter.step("Stop nodes with chunk."):
|
||||
|
@ -497,48 +488,47 @@ class TestECReplication(ClusterTestBase):
|
|||
|
||||
with reporter.step("Delete second object"):
|
||||
cluster_nodes = list(set(self.cluster.cluster_nodes) - {chunk_node[0]})
|
||||
grpc_client.object.delete(cid, oid_second, cluster_nodes[0].storage_node.get_rpc_endpoint())
|
||||
grpc_client.object.delete(container, oid_second, cluster_nodes[0].storage_node.get_rpc_endpoint())
|
||||
|
||||
with reporter.step("Check that delete all chunk second object."):
|
||||
for chunk in chunks_second_object:
|
||||
with pytest.raises(RuntimeError, match="object already removed|object not found"):
|
||||
grpc_client.object.head(cid, chunk.object_id, cluster_nodes[0].storage_node.get_rpc_endpoint())
|
||||
grpc_client.object.head(container, chunk.object_id, cluster_nodes[0].storage_node.get_rpc_endpoint())
|
||||
|
||||
@allure.title("Request LOCK (size={object_size})")
|
||||
@pytest.mark.failover
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
|
||||
def test_lock_object_in_ec_cnr(
|
||||
self,
|
||||
container: str,
|
||||
test_file: TestFile,
|
||||
grpc_client: GrpcClientWrapper,
|
||||
frostfs_cli: FrostfsCli,
|
||||
object_size: ObjectSize,
|
||||
cluster_state_controller: ClusterStateController,
|
||||
include_excluded_nodes: None,
|
||||
) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
|
||||
|
||||
with reporter.step("Put object in container."):
|
||||
test_file = generate_file(object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Check object chunks nodes."):
|
||||
chunks_object = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
|
||||
chunks_object = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
|
||||
replication_count = 3 if object_size.name == "simple" else 3 * 4
|
||||
assert len(chunks_object) == replication_count
|
||||
|
||||
with reporter.step("Put LOCK in object."):
|
||||
# TODO Rework for the grpc_client when the netmap methods are implemented
|
||||
epoch = frostfs_cli.netmap.epoch(self.cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout.strip()
|
||||
grpc_client.object.lock(cid, oid, self.cluster.default_rpc_endpoint, expire_at=(int(epoch) + 5))
|
||||
grpc_client.object.lock(container, oid, self.cluster.default_rpc_endpoint, expire_at=(int(epoch) + 5))
|
||||
|
||||
with reporter.step("Check don`t delete chunk"):
|
||||
for chunk in chunks_object:
|
||||
with pytest.raises(RuntimeError, match="Lock EC chunk failed"):
|
||||
grpc_client.object.delete(cid, chunk.object_id, self.cluster.default_rpc_endpoint)
|
||||
grpc_client.object.delete(container, chunk.object_id, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Check enable LOCK object"):
|
||||
with pytest.raises(RuntimeError, match="object is locked"):
|
||||
grpc_client.object.delete(cid, oid, self.cluster.default_rpc_endpoint)
|
||||
grpc_client.object.delete(container, oid, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Remove node in netmap."):
|
||||
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunks_object[0])[0]
|
||||
|
@ -548,11 +538,11 @@ class TestECReplication(ClusterTestBase):
|
|||
with reporter.step("Check don`t delete chunk."):
|
||||
for chunk in chunks_object:
|
||||
with pytest.raises(RuntimeError, match="Lock EC chunk failed|object not found"):
|
||||
grpc_client.object.delete(cid, chunk.object_id, alive_node.storage_node.get_rpc_endpoint())
|
||||
grpc_client.object.delete(container, chunk.object_id, alive_node.storage_node.get_rpc_endpoint())
|
||||
|
||||
with reporter.step("Check enable LOCK object"):
|
||||
with pytest.raises(RuntimeError, match="object is locked"):
|
||||
grpc_client.object.delete(cid, oid, alive_node.storage_node.get_rpc_endpoint())
|
||||
grpc_client.object.delete(container, oid, alive_node.storage_node.get_rpc_endpoint())
|
||||
|
||||
with reporter.step("Include node in netmap"):
|
||||
cluster_state_controller.include_node_to_netmap(chunk_node.storage_node, alive_node.storage_node)
|
||||
|
@ -639,8 +629,10 @@ class TestECReplication(ClusterTestBase):
|
|||
|
||||
@allure.title("Evacuation shard with chunk (type={type})")
|
||||
@pytest.mark.parametrize("type, get_chunk", [("data", get_data_chunk_object), ("parity", get_parity_chunk_object)])
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 1.1 CBF 1"))
|
||||
def test_evacuation_data_shard(
|
||||
self,
|
||||
container: str,
|
||||
restore_nodes_shards_mode: None,
|
||||
frostfs_cli: FrostfsCli,
|
||||
grpc_client: GrpcClientWrapper,
|
||||
|
@ -648,15 +640,12 @@ class TestECReplication(ClusterTestBase):
|
|||
type: str,
|
||||
get_chunk,
|
||||
) -> None:
|
||||
with reporter.step("Create container."):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 1.1 CBF 1", await_mode=True)
|
||||
|
||||
with reporter.step("Put object in container."):
|
||||
test_file = generate_file(max_object_size - 1000)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Get object chunks."):
|
||||
chunk = get_chunk(self, frostfs_cli, cid, oid, self.cluster.default_rpc_endpoint)
|
||||
chunk = get_chunk(self, frostfs_cli, container, oid, self.cluster.default_rpc_endpoint)
|
||||
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)
|
||||
frostfs_node_cli = self.get_node_cli(chunk_node[0], config=chunk_node[0].storage_node.get_remote_wallet_config_path())
|
||||
|
||||
|
@ -669,7 +658,7 @@ class TestECReplication(ClusterTestBase):
|
|||
frostfs_node_cli.shards.evacuation_start(chunk_node[0].storage_node.get_control_endpoint(), shard_id, await_mode=True)
|
||||
|
||||
with reporter.step("Get object after evacuation shard"):
|
||||
grpc_client.object.get(cid, oid, self.cluster.default_rpc_endpoint)
|
||||
grpc_client.object.get(container, oid, self.cluster.default_rpc_endpoint)
|
||||
|
||||
@allure.title("[NEGATIVE] Don`t create more 1 EC policy")
|
||||
def test_more_one_ec_policy(self, grpc_client: GrpcClientWrapper) -> None:
|
||||
|
@ -698,6 +687,7 @@ class TestECReplication(ClusterTestBase):
|
|||
@pytest.mark.parametrize("s3_policy, s3_client", [(S3_POLICY_FILE_LOCATION, AwsCliClient)], indirect=True)
|
||||
def test_count_chunks_bucket_with_ec_location(
|
||||
self,
|
||||
test_file: TestFile,
|
||||
s3_client: S3ClientWrapper,
|
||||
bucket_container_resolver: BucketContainerResolver,
|
||||
grpc_client: GrpcClientWrapper,
|
||||
|
@ -712,7 +702,6 @@ class TestECReplication(ClusterTestBase):
|
|||
assert bucket_status == VersioningStatus.ENABLED.value
|
||||
|
||||
with reporter.step("Put object in bucket"):
|
||||
test_file = generate_file(object_size.value)
|
||||
bucket_object = s3_client.put_object(bucket, test_file)
|
||||
|
||||
with reporter.step("Watch replication count chunks"):
|
||||
|
@ -722,23 +711,20 @@ class TestECReplication(ClusterTestBase):
|
|||
assert len(chunks) == expect_chunks
|
||||
|
||||
@allure.title("Replication chunk after drop (size={object_size})")
|
||||
def test_drop_chunk_and_replication(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize, rep_count: int) -> None:
|
||||
with reporter.step("Create container"):
|
||||
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1 CBF 1", await_mode=True)
|
||||
|
||||
@requires_container(PUBLIC_WITH_POLICY("EC 2.1 CBF 1"))
|
||||
def test_drop_chunk_and_replication(self, test_file: TestFile, container: str, grpc_client: GrpcClientWrapper, rep_count: int) -> None:
|
||||
with reporter.step("Put object"):
|
||||
test_file = generate_file(object_size.value)
|
||||
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
|
||||
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
|
||||
|
||||
with reporter.step("Get all chunks"):
|
||||
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, cid, oid=oid)
|
||||
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
|
||||
|
||||
with reporter.step("Search chunk node"):
|
||||
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)
|
||||
shell_chunk_node = chunk_node[0].host.get_shell()
|
||||
|
||||
with reporter.step("Get replication count"):
|
||||
assert self.check_replication(rep_count, grpc_client, cid, oid)
|
||||
assert self.check_replication(rep_count, grpc_client, container, oid)
|
||||
|
||||
with reporter.step("Delete chunk"):
|
||||
frostfs_node_cli = FrostfsCli(
|
||||
|
@ -746,7 +732,7 @@ class TestECReplication(ClusterTestBase):
|
|||
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
|
||||
config_file=chunk_node[0].storage_node.get_remote_wallet_config_path(),
|
||||
)
|
||||
frostfs_node_cli.control.drop_objects(chunk_node[0].storage_node.get_control_endpoint(), f"{cid}/{data_chunk.object_id}")
|
||||
frostfs_node_cli.control.drop_objects(chunk_node[0].storage_node.get_control_endpoint(), f"{container}/{data_chunk.object_id}")
|
||||
|
||||
with reporter.step("Wait replication count after drop one chunk"):
|
||||
self.wait_replication(rep_count, grpc_client, cid, oid)
|
||||
self.wait_replication(rep_count, grpc_client, container, oid)
|
||||
|
|
561
pytest_tests/testsuites/services/s3_gate/test_s3_http_object.py
Normal file
561
pytest_tests/testsuites/services/s3_gate/test_s3_http_object.py
Normal file
|
@ -0,0 +1,561 @@
|
|||
import random
|
||||
import time
|
||||
from datetime import datetime
|
||||
from email.utils import formatdate
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.cli.generic_cli import GenericCli
|
||||
from frostfs_testlib.credentials.interfaces import User
|
||||
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.s3.s3_http_client import S3HttpClient
|
||||
from frostfs_testlib.shell.interfaces import CommandOptions
|
||||
from frostfs_testlib.shell.local_shell import LocalShell
|
||||
from frostfs_testlib.steps.s3 import s3_helper
|
||||
from frostfs_testlib.storage.cluster import ClusterNode
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
|
||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split_file
|
||||
from frostfs_testlib.utils.string_utils import unique_name
|
||||
|
||||
from ....resources.common import S3_POLICY_FILE_LOCATION
|
||||
|
||||
FIVE_GIGABYTES = 5_368_709_120
|
||||
PART_SIZE_FOR_MULTIPART = 5 * 1024 * 1024
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
|
||||
if "s3_client" not in metafunc.fixturenames:
|
||||
return
|
||||
metafunc.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], ids=["s3policy"], indirect=True)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", params=[pytest.param("rep3", marks=pytest.mark.rep), pytest.param("ec3.1", marks=pytest.mark.ec)])
|
||||
def placement_policy(request: pytest.FixtureRequest) -> PlacementPolicy:
|
||||
if request.param == "ec3.1":
|
||||
return PlacementPolicy("ec3.1", "ec3.1")
|
||||
return PlacementPolicy("rep3", "rep3")
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus:
|
||||
if "param" in request.__dict__:
|
||||
return request.param
|
||||
return VersioningStatus.UNDEFINED
|
||||
|
||||
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.s3_gate
|
||||
class TestS3GateHttpObject(ClusterTestBase):
|
||||
@allure.title("[Class] Create bucket")
|
||||
@pytest.fixture(scope="class")
|
||||
def bucket(self, s3_client: S3ClientWrapper, versioning_status: VersioningStatus, placement_policy: PlacementPolicy) -> str:
|
||||
with reporter.step(f"Create bucket with location constraint {placement_policy.value}"):
|
||||
bucket = s3_client.create_bucket(location_constraint=placement_policy.value)
|
||||
|
||||
s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status)
|
||||
|
||||
with reporter.step("Allow patch for bucket"):
|
||||
s3_client.put_bucket_policy(
|
||||
bucket,
|
||||
policy={
|
||||
"Version": "2012-10-17",
|
||||
"Id": "aaaa-bbbb-cccc-dddd",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "AddPerm",
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": ["s3:PatchObject"],
|
||||
"Resource": [f"arn:aws:s3:::{bucket}/*"],
|
||||
},
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
return bucket
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def original_object(self, s3_client: S3ClientWrapper, bucket: str, object_size: ObjectSize) -> str:
|
||||
with reporter.step("Generate test object"):
|
||||
file = generate_file(object_size.value)
|
||||
key = s3_helper.object_key_from_file_path(file)
|
||||
|
||||
with reporter.step("Put object"):
|
||||
s3_client.put_object(bucket, file, key)
|
||||
|
||||
return key
|
||||
|
||||
@allure.title("Patch simple object payload (range={patch_range}, s3_client={s3_client}, policy={placement_policy})")
|
||||
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
|
||||
@pytest.mark.parametrize(
|
||||
"patch_range",
|
||||
# String "object" denotes size of object.
|
||||
["0:19", "500:550", "object/2-100:object/2+200", "object-1:object", "object:object", "object:object+123"],
|
||||
)
|
||||
def test_patch_simple_object_payload(
|
||||
self,
|
||||
s3_client: S3ClientWrapper,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
original_object: str,
|
||||
object_size: ObjectSize,
|
||||
patch_range: str,
|
||||
):
|
||||
start, end = s3_helper.get_range_relative_to_object(patch_range, object_size.value, int_values=True)
|
||||
content_size = end - start + 1
|
||||
content_range = f"bytes {start}-{end}/*"
|
||||
|
||||
with reporter.step("Generate payload object"):
|
||||
content_file = generate_file(content_size)
|
||||
|
||||
with reporter.step("Patch simple object"):
|
||||
s3_http_client.patch_object(bucket, original_object, content_file, content_range)
|
||||
|
||||
with reporter.step("Get patched part of object and make sure it has changed correctly"):
|
||||
patched_file_part = s3_client.get_object(bucket, original_object, object_range=(start, end))
|
||||
assert get_file_hash(patched_file_part) == get_file_hash(
|
||||
content_file
|
||||
), "Expected content hash did not match actual content hash"
|
||||
|
||||
@allure.title("Patch complex object payload (range={patch_range}, s3_client={s3_client}, policy={placement_policy})")
|
||||
@pytest.mark.parametrize("object_size", ["complex"], indirect=True)
|
||||
@pytest.mark.parametrize(
|
||||
"patch_range",
|
||||
# Strings "object" and "part" denote size of object and its part, respectively.
|
||||
["part:part+100", "object-part:object", "0:part", "part*2:part*3", "part-1:part*2", "part+1:part*2-1"],
|
||||
)
|
||||
def test_patch_complex_object_payload(
|
||||
self,
|
||||
s3_client: S3ClientWrapper,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
original_object: str,
|
||||
object_size: ObjectSize,
|
||||
max_object_size: int,
|
||||
patch_range: str,
|
||||
):
|
||||
start, end = s3_helper.get_range_relative_to_object(patch_range, object_size.value, max_object_size, int_values=True)
|
||||
content_size = end - start + 1
|
||||
content_range = f"bytes {start}-{end}/*"
|
||||
|
||||
with reporter.step("Generate payload object"):
|
||||
content_file = generate_file(content_size)
|
||||
|
||||
with reporter.step("Patch complex object"):
|
||||
s3_http_client.patch_object(bucket, original_object, content_file, content_range)
|
||||
|
||||
with reporter.step("Get patched part of object and make sure it has changed correctly"):
|
||||
patched_file_part = s3_client.get_object(bucket, original_object, object_range=(start, end))
|
||||
assert get_file_hash(patched_file_part) == get_file_hash(
|
||||
content_file
|
||||
), "Expected content hash did not match actual content hash"
|
||||
|
||||
@allure.title(
|
||||
"Patch object with fulfilled If-Match condition (s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
|
||||
)
|
||||
def test_patch_with_fulfilled_if_match_contidion(
|
||||
self,
|
||||
s3_client: S3ClientWrapper,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
original_object: str,
|
||||
):
|
||||
start, end = 100, 199
|
||||
content_size = end - start + 1
|
||||
content_range = f"bytes {start}-{end}/*"
|
||||
|
||||
with reporter.step("Generate payload object"):
|
||||
content_file = generate_file(content_size)
|
||||
expected_hash = get_file_hash(content_file)
|
||||
|
||||
with reporter.step("Get object ETag attribute"):
|
||||
object_info = s3_client.head_object(bucket, original_object)
|
||||
etag = object_info["ETag"]
|
||||
|
||||
with reporter.step("Patch object with If-Match header"):
|
||||
s3_http_client.patch_object(bucket, original_object, content_file, content_range, if_match=etag)
|
||||
|
||||
with reporter.step("Get patched object and make sure it has changed correctly"):
|
||||
patched_file = s3_client.get_object(bucket, original_object)
|
||||
patched_hash = get_file_hash(patched_file, offset=start, len=content_size)
|
||||
assert patched_hash == expected_hash, "Expected content hash did not match actual content hash"
|
||||
|
||||
@allure.title(
|
||||
"[NEGATIVE] Patch cannot be applied with failed If-Match condition "
|
||||
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
|
||||
)
|
||||
def test_patch_with_failed_if_match_condition(self, s3_http_client: S3HttpClient, bucket: str, original_object: str):
|
||||
with reporter.step("Try patch object with If-Match header and get exception"):
|
||||
with pytest.raises(Exception, match="PreconditionFailed"):
|
||||
s3_http_client.patch_object(bucket, original_object, "content", "bytes 0-6/*", if_match="nonexistentetag")
|
||||
|
||||
@allure.title(
|
||||
"Patch object with fulfilled If-Unmodified-Since condition "
|
||||
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
|
||||
)
|
||||
def test_patch_with_fulfilled_if_unmodified_since_condition(
|
||||
self,
|
||||
s3_client: S3ClientWrapper,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
original_object: str,
|
||||
):
|
||||
start, end = 235, 341
|
||||
content_size = end - start + 1
|
||||
content_range = f"bytes {start}-{end}/*"
|
||||
|
||||
with reporter.step("Generate payload object"):
|
||||
content_file = generate_file(content_size)
|
||||
expected_hash = get_file_hash(content_file)
|
||||
|
||||
with reporter.step("Get object LastModified attribute"):
|
||||
response = s3_client.head_object(bucket, original_object)
|
||||
if isinstance(response["LastModified"], str):
|
||||
response["LastModified"] = datetime.fromisoformat(response["LastModified"])
|
||||
|
||||
# Convert datetime to RFC 7232 format
|
||||
last_modified = formatdate(response["LastModified"].timestamp(), localtime=False, usegmt=True)
|
||||
|
||||
with reporter.step("Patch object with If-Unmodified-Since header"):
|
||||
s3_http_client.patch_object(bucket, original_object, content_file, content_range, if_unmodified_since=last_modified)
|
||||
|
||||
with reporter.step("Get patched object and make sure it has changed correctly"):
|
||||
patched_file = s3_client.get_object(bucket, original_object)
|
||||
patched_hash = get_file_hash(patched_file, offset=start, len=content_size)
|
||||
assert patched_hash == expected_hash, "Expected content hash did not match actual content hash"
|
||||
|
||||
@allure.title(
|
||||
"[NEGATIVE] Patch cannot be applied with failed If-Unmodified-Since condition "
|
||||
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
|
||||
)
|
||||
def test_patch_with_failed_if_unmodified_since_condition(
|
||||
self,
|
||||
s3_client: S3ClientWrapper,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
original_object: str,
|
||||
object_size: ObjectSize,
|
||||
):
|
||||
with reporter.step("Generate new object to update LastModified attribute of original object"):
|
||||
new_object_file = generate_file(object_size.value)
|
||||
|
||||
with reporter.step("Get original object LastModified attribute"):
|
||||
response = s3_client.head_object(bucket, original_object)
|
||||
if isinstance(response["LastModified"], str):
|
||||
response["LastModified"] = datetime.fromisoformat(response["LastModified"])
|
||||
|
||||
# Convert datetime to RFC 7232 format
|
||||
previous_last_modified = formatdate(response["LastModified"].timestamp(), localtime=False, usegmt=True)
|
||||
|
||||
# Wait to provide new LastModified
|
||||
time.sleep(2)
|
||||
|
||||
with reporter.step("Put new data for existing object"):
|
||||
s3_client.put_object(bucket, new_object_file, original_object)
|
||||
|
||||
with reporter.step("Get object LastModified attribute with new data and make sure it has changed"):
|
||||
response = s3_client.head_object(bucket, original_object)
|
||||
if isinstance(response["LastModified"], str):
|
||||
response["LastModified"] = datetime.fromisoformat(response["LastModified"])
|
||||
|
||||
# Convert datetime to RFC 7232 format
|
||||
last_modified = formatdate(response["LastModified"].timestamp(), localtime=False, usegmt=True)
|
||||
assert last_modified != previous_last_modified, f"Attribute LastModified was expected to change: {last_modified}"
|
||||
|
||||
with reporter.step("Try patch object with If-Unmodified-Since header and get exception"):
|
||||
with pytest.raises(Exception, match="PreconditionFailed"):
|
||||
s3_http_client.patch_object(bucket, original_object, b"modify", "bytes 0-5/*", if_unmodified_since=previous_last_modified)
|
||||
|
||||
@allure.title(
|
||||
"Patch object with fulfilled x-amz-expected-bucket-owner condition "
|
||||
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
|
||||
)
|
||||
def test_patch_with_fulfilled_if_expected_bucket_owner_condition(
|
||||
self,
|
||||
s3_client: S3ClientWrapper,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
original_object: str,
|
||||
default_user: User,
|
||||
):
|
||||
start, end = 512, 749
|
||||
content_size = end - start + 1
|
||||
content_range = f"bytes {start}-{end}/*"
|
||||
|
||||
with reporter.step("Generate payload object"):
|
||||
content_file = generate_file(content_size)
|
||||
expected_hash = get_file_hash(content_file)
|
||||
# First 4 chars are always AIDA, which are not part of real ID
|
||||
expected_bucket_owner = default_user.attributes["id"][4:]
|
||||
|
||||
with reporter.step("Patch object with x-amz-expected-bucket-owner header"):
|
||||
s3_http_client.patch_object(
|
||||
bucket,
|
||||
original_object,
|
||||
content_file,
|
||||
content_range,
|
||||
x_amz_expected_bucket_owner=expected_bucket_owner,
|
||||
)
|
||||
|
||||
with reporter.step("Get patched object and make sure it has changed correctly"):
|
||||
patched_file = s3_client.get_object(bucket, original_object)
|
||||
patched_hash = get_file_hash(patched_file, offset=start, len=content_size)
|
||||
assert patched_hash == expected_hash, "Expected content hash did not match actual content hash"
|
||||
|
||||
@allure.title(
|
||||
"[NEGATIVE] Patch cannot be applied with failed x-amz-expected-bucket-owner condition "
|
||||
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
|
||||
)
|
||||
def test_patch_with_failed_if_expected_bucket_owner_condition(
|
||||
self,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
original_object: str,
|
||||
default_user: User,
|
||||
):
|
||||
# First 4 chars are always AIDA, which are not part of real ID
|
||||
unexpected_bucket_owner = list(default_user.attributes["id"][4:])
|
||||
random.shuffle(unexpected_bucket_owner)
|
||||
unexpected_bucket_owner = "".join(unexpected_bucket_owner)
|
||||
|
||||
with reporter.step("Try patch object with x-amz-expected-bucket-owner header and get exception"):
|
||||
with pytest.raises(Exception, match="AccessDenied"):
|
||||
s3_http_client.patch_object(
|
||||
bucket,
|
||||
original_object,
|
||||
b"blablabla",
|
||||
"bytes 10-18/*",
|
||||
x_amz_expected_bucket_owner=unexpected_bucket_owner,
|
||||
)
|
||||
|
||||
@allure.title(
|
||||
"[NEGATIVE] Patch cannot be applied with invalid Content-Range header "
|
||||
"(range={patch_range}, s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"patch_range",
|
||||
# String "object" denotes size of object.
|
||||
["object+100:200", "object+10:object+16", "-1:1", "20:100", "0:2", f"0:{FIVE_GIGABYTES}", "0:0"],
|
||||
)
|
||||
def test_patch_with_invalid_content_range(
|
||||
self,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
original_object: str,
|
||||
object_size: ObjectSize,
|
||||
patch_range: str,
|
||||
):
|
||||
content_range = s3_helper.get_range_relative_to_object(patch_range, object_size.value)
|
||||
with reporter.step("Try patch object with invalid Content-Range header and get exception"):
|
||||
with pytest.raises(Exception, match="InvalidRange"):
|
||||
s3_http_client.patch_object(bucket, original_object, b"content", content_range)
|
||||
|
||||
@allure.title(
|
||||
"[NEGATIVE] Patch cannot be applied without Content-Range header "
|
||||
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
|
||||
)
|
||||
def test_patch_without_content_range(self, s3_http_client: S3HttpClient, bucket: str, original_object: str):
|
||||
with reporter.step("Try patch object without Content-Range header and get exception"):
|
||||
with pytest.raises(Exception, match="MissingContentRange"):
|
||||
s3_http_client.patch_object(bucket, original_object, b"content", None)
|
||||
|
||||
@allure.title(
|
||||
"[NEGATIVE] Patch cannot be applied without Content-Length header "
|
||||
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
|
||||
)
|
||||
def test_patch_without_content_length(
|
||||
self,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
original_object: str,
|
||||
node_under_test: ClusterNode,
|
||||
):
|
||||
with reporter.step("Generate headers that comply with AWS specification"):
|
||||
data = "content"
|
||||
url = f"{self.cluster.default_s3_gate_endpoint}/{bucket}/{original_object}"
|
||||
host = self.cluster.default_s3_gate_endpoint[8:]
|
||||
headers = {"Host": host, "Url": url, "Content-Range": "bytes 0-6/*"}
|
||||
headers = dict(s3_http_client._create_aws_request("PATCH", url, headers, data).headers)
|
||||
headers.pop("Content-Length", None)
|
||||
|
||||
with reporter.step("Try patch object without Content-Length header and get exception"):
|
||||
curl = GenericCli("curl", node_under_test.host)
|
||||
request = f" {url} -X PATCH"
|
||||
|
||||
for header, value in headers.items():
|
||||
request += f" -H '{header}: {value}'"
|
||||
|
||||
# Remove Content-Length header
|
||||
request += " -H 'Content-Length:'"
|
||||
request += f" -d '{data}' -k"
|
||||
|
||||
response = curl(request, shell=LocalShell(), options=CommandOptions(check=False))
|
||||
assert "MissingContentLength" in response.stdout, response.stdout
|
||||
|
||||
@allure.title("[NEGATIVE] Patch cannot be applied to non-existent bucket (policy={placement_policy})")
|
||||
def test_patch_non_existent_bucket(self, s3_http_client: S3HttpClient):
|
||||
with reporter.step("Try patch object in non-existent bucket and get exception"):
|
||||
with pytest.raises(Exception, match="NoSuchBucket"):
|
||||
s3_http_client.patch_object("fake-bucket", unique_name("object-"), b"content", "bytes 0-6/*")
|
||||
|
||||
@allure.title("[NEGATIVE] Patch cannot be applied to non-existent object (s3_client={s3_client}, policy={placement_policy})")
|
||||
def test_patch_non_existent_object(self, s3_http_client: S3HttpClient, bucket: str):
|
||||
with reporter.step("Try patch non-existent object and get exception"):
|
||||
with pytest.raises(Exception, match="NoSuchKey"):
|
||||
s3_http_client.patch_object(bucket, "fake-object", b"content", "bytes 0-6/*")
|
||||
|
||||
@allure.title("Patch object in versioned bucket (s3_client={s3_client}, object_size={object_size}, policy={placement_policy})")
|
||||
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
|
||||
def test_patch_object_in_versioned_bucket(
|
||||
self,
|
||||
s3_client: S3ClientWrapper,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
object_size: ObjectSize,
|
||||
):
|
||||
patch_ranges = ["0:35", "40:49", "object-100:object", "object:object+231"]
|
||||
|
||||
with reporter.step("Generate original object"):
|
||||
original_file = generate_file(object_size.value)
|
||||
original_key = s3_helper.object_key_from_file_path(original_file)
|
||||
|
||||
with reporter.step("Put object"):
|
||||
version = s3_client.put_object(bucket, original_file, original_key)
|
||||
expected_versions = {version}
|
||||
|
||||
with reporter.step("Patch versioned object"):
|
||||
for rng in patch_ranges:
|
||||
start, end = s3_helper.get_range_relative_to_object(rng, object_size=object_size.value, int_values=True)
|
||||
content_size = end - start + 1
|
||||
content_range = f"bytes {start}-{end}/*"
|
||||
|
||||
with reporter.step(f"Generate payload object of {content_size} bytes"):
|
||||
content_file = generate_file(content_size)
|
||||
|
||||
with reporter.step(f"Patch object and get new version"):
|
||||
response = s3_http_client.patch_object(bucket, original_key, content_file, content_range, version_id=version)
|
||||
version = response["VersionId"]
|
||||
expected_versions.add(version)
|
||||
|
||||
with reporter.step(f"Get patched part of object and make sure it has changed correctly"):
|
||||
got_part = s3_client.get_object(bucket, original_key, version_id=version, object_range=(start, end))
|
||||
assert get_file_hash(got_part) == get_file_hash(content_file), "Expected content hash did not match actual content hash"
|
||||
|
||||
with reporter.step("Check that all expected versions are in bucket"):
|
||||
got_versions = {
|
||||
version.get("VersionId") for version in s3_client.list_objects_versions(bucket) if version.get("Key") == original_key
|
||||
}
|
||||
assert expected_versions == got_versions, f"Expected versions of object are missing from bucket: {expected_versions}"
|
||||
|
||||
@allure.title("Patch multipart object (range={patch_range}, s3_client={s3_client}, policy={placement_policy})")
|
||||
@pytest.mark.parametrize("patch_range", ["0:part-1", "part:part*2-1", "part-100:part*2+200", "object-part-1:object"])
|
||||
def test_s3_patch_multipart_object(
|
||||
self,
|
||||
s3_client: S3ClientWrapper,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
patch_range: str,
|
||||
):
|
||||
parts_count = 5
|
||||
parts = []
|
||||
original_size = PART_SIZE_FOR_MULTIPART * parts_count
|
||||
|
||||
with reporter.step("Generate original object and split it into parts"):
|
||||
original_file = generate_file(original_size)
|
||||
file_parts = split_file(original_file, parts_count)
|
||||
object_key = s3_helper.object_key_from_file_path(original_file)
|
||||
|
||||
start, end = s3_helper.get_range_relative_to_object(
|
||||
patch_range, object_size=original_size, part_size=PART_SIZE_FOR_MULTIPART, int_values=True
|
||||
)
|
||||
content_size = end - start + 1
|
||||
content_range = f"bytes {start}-{end}/*"
|
||||
|
||||
with reporter.step("Generate payload object"):
|
||||
content_file = generate_file(content_size)
|
||||
|
||||
with reporter.step("Create multipart and upload parts"):
|
||||
upload_id = s3_client.create_multipart_upload(bucket, object_key)
|
||||
for part_id, file_path in enumerate(file_parts, start=1):
|
||||
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
|
||||
parts.append((part_id, etag))
|
||||
|
||||
with reporter.step("Check all parts are visible in bucket"):
|
||||
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
|
||||
assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}"
|
||||
|
||||
with reporter.step("Complete multipart upload"):
|
||||
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
|
||||
|
||||
with reporter.step("Patch multipart object"):
|
||||
s3_http_client.patch_object(bucket, object_key, content_file, content_range, timeout=200)
|
||||
|
||||
with reporter.step("Get patched part of object and make sure it has changed correctly"):
|
||||
got_part = s3_client.get_object(bucket, object_key, object_range=(start, end))
|
||||
assert get_file_hash(got_part) == get_file_hash(content_file), "Expected content hash did not match actual content hash"
|
||||
|
||||
@allure.title("Patch multipart object in versioned bucket (s3_client={s3_client}, policy={placement_policy})")
|
||||
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
|
||||
def test_s3_patch_multipart_object_in_versioned_bucket(
|
||||
self,
|
||||
s3_client: S3ClientWrapper,
|
||||
s3_http_client: S3HttpClient,
|
||||
bucket: str,
|
||||
):
|
||||
parts = []
|
||||
parts_count = 5
|
||||
original_size = PART_SIZE_FOR_MULTIPART * parts_count
|
||||
patch_ranges = ["0:part-1", "part:part*2-1", "part-100:part*2+200", "object-part-1:object"]
|
||||
|
||||
with reporter.step("Generate original object and split it into parts"):
|
||||
original_file = generate_file(original_size)
|
||||
original_key = s3_helper.object_key_from_file_path(original_file)
|
||||
file_parts = split_file(original_file, parts_count)
|
||||
|
||||
with reporter.step("Create multipart and upload parts"):
|
||||
upload_id = s3_client.create_multipart_upload(bucket, original_key)
|
||||
for part_id, file_path in enumerate(file_parts, start=1):
|
||||
etag = s3_client.upload_part(bucket, original_key, upload_id, part_id, file_path)
|
||||
parts.append((part_id, etag))
|
||||
|
||||
with reporter.step("Check all parts are visible in bucket"):
|
||||
got_parts = s3_client.list_parts(bucket, original_key, upload_id)
|
||||
assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}"
|
||||
|
||||
with reporter.step("Complete multipart upload"):
|
||||
response = s3_client.complete_multipart_upload(bucket, original_key, upload_id, parts)
|
||||
version = response["VersionId"]
|
||||
expected_versions = {version}
|
||||
|
||||
with reporter.step("Patch versioned multipart object"):
|
||||
for rng in patch_ranges:
|
||||
start, end = s3_helper.get_range_relative_to_object(
|
||||
rng, object_size=original_size, part_size=PART_SIZE_FOR_MULTIPART, int_values=True
|
||||
)
|
||||
content_size = end - start + 1
|
||||
content_range = f"bytes {start}-{end}/*"
|
||||
|
||||
with reporter.step("Generate payload object"):
|
||||
content_file = generate_file(content_size)
|
||||
|
||||
with reporter.step("Patch multipart object and get new version"):
|
||||
response = s3_http_client.patch_object(
|
||||
bucket, original_key, content_file, content_range, version_id=version, timeout=200
|
||||
)
|
||||
version = response["VersionId"]
|
||||
expected_versions.add(version)
|
||||
|
||||
with reporter.step("Get patched part of object and make sure it has changed correctly"):
|
||||
got_part = s3_client.get_object(bucket, original_key, version_id=version, object_range=(start, end))
|
||||
assert get_file_hash(got_part) == get_file_hash(content_file), "Expected content hash did not match actual content hash"
|
||||
|
||||
with reporter.step("Check that all expected versions are in bucket"):
|
||||
got_versions = {
|
||||
version.get("VersionId") for version in s3_client.list_objects_versions(bucket) if version.get("Key") == original_key
|
||||
}
|
||||
assert expected_versions == got_versions, f"Expected versions of object are missing from bucket: {expected_versions}"
|
||||
|
||||
# TODO: Negative scenario for SSE objects is postponed for now.
|
|
@ -2,7 +2,9 @@ import allure
|
|||
import pytest
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper
|
||||
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||
from frostfs_testlib.s3.s3_http_client import S3HttpClient
|
||||
from frostfs_testlib.steps.cli.container import list_objects
|
||||
from frostfs_testlib.steps.s3 import s3_helper
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
|
|
Loading…
Add table
Reference in a new issue