[#258] Speed up tests by removing cleanup and per test healthcheck

Signed-off-by: a.berezin <a.berezin@yadro.com>
This commit is contained in:
Andrey Berezin 2024-06-25 02:27:54 +03:00
parent c1759bfa08
commit e65d19f056
16 changed files with 412 additions and 800 deletions

View file

@ -11,7 +11,6 @@ markers =
sanity: test runs in sanity testrun sanity: test runs in sanity testrun
smoke: test runs in smoke testrun smoke: test runs in smoke testrun
# controlling markers # controlling markers
no_healthcheck: skip healthcheck for this test
order: manual control of test order order: manual control of test order
# functional markers # functional markers
maintenance: tests for change mode node maintenance: tests for change mode node

View file

@ -124,30 +124,13 @@ class TestACLBasic(ClusterTestBase):
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS) other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = private_container cid = private_container
with reporter.step("Add test objects to container"): with reporter.step("Add test objects to container"):
owner_object_oid = put_object_to_random_node( owner_object_oid = put_object_to_random_node(user_wallet, file_path, cid, self.shell, self.cluster)
user_wallet, file_path, cid, shell=self.shell, cluster=self.cluster
)
with reporter.step("Check only owner has full access to private container"): with reporter.step("Check no one except owner has access to operations with container"):
with reporter.step("Check no one except owner has access to operations with container"): check_no_access_to_container(other_wallet, cid, owner_object_oid, file_path, self.shell, self.cluster)
check_no_access_to_container(
other_wallet,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step("Check owner has full access to private container"): with reporter.step("Check owner has full access to private container"):
check_full_access_to_container( check_full_access_to_container(user_wallet, cid, owner_object_oid, file_path, self.shell, self.cluster)
user_wallet,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("Operations with basic ACL on READONLY container (obj_size={object_size})") @allure.title("Operations with basic ACL on READONLY container (obj_size={object_size})")
def test_basic_acl_readonly(self, wallets: Wallets, client_shell: Shell, read_only_container: str, file_path: str): def test_basic_acl_readonly(self, wallets: Wallets, client_shell: Shell, read_only_container: str, file_path: str):
@ -159,26 +142,10 @@ class TestACLBasic(ClusterTestBase):
cid = read_only_container cid = read_only_container
with reporter.step("Add test objects to container"): with reporter.step("Add test objects to container"):
object_oid = put_object_to_random_node( object_oid = put_object_to_random_node(user_wallet, file_path, cid, client_shell, self.cluster)
user_wallet, file_path, cid, shell=client_shell, cluster=self.cluster
)
with reporter.step("Check other has read-only access to operations with container"): with reporter.step("Check other has read-only access to operations with container"):
check_read_only_container( check_read_only_container(other_wallet, cid, object_oid, file_path, client_shell, self.cluster)
other_wallet,
cid,
object_oid,
file_path,
shell=client_shell,
cluster=self.cluster,
)
with reporter.step("Check owner has full access to public container"): with reporter.step("Check owner has full access to public container"):
check_full_access_to_container( check_full_access_to_container(user_wallet, cid, object_oid, file_path, client_shell, self.cluster)
user_wallet,
cid,
object_oid,
file_path,
shell=client_shell,
cluster=self.cluster,
)

View file

@ -195,7 +195,7 @@ def simple_object_size(max_object_size: int) -> ObjectSize:
@pytest.fixture() @pytest.fixture()
def file_path(object_size: ObjectSize) -> str: def file_path(object_size: ObjectSize) -> str:
yield generate_file(object_size.value) return generate_file(object_size.value)
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@ -256,8 +256,8 @@ def cluster(temp_directory: str, hosting: Hosting, client_shell: Shell) -> Clust
yield cluster yield cluster
@reporter.step("[Class]: Provide S3 policy") @allure.title("[Session]: Provide S3 policy")
@pytest.fixture(scope="class") @pytest.fixture(scope="session")
def s3_policy(request: pytest.FixtureRequest): def s3_policy(request: pytest.FixtureRequest):
policy = None policy = None
if "param" in request.__dict__: if "param" in request.__dict__:
@ -296,12 +296,12 @@ def credentials_provider(cluster: Cluster) -> CredentialsProvider:
return CredentialsProvider(cluster) return CredentialsProvider(cluster)
@reporter.step("[Class]: Create S3 client") @allure.title("[Session]: Create S3 client")
@pytest.fixture( @pytest.fixture(
scope="class", scope="session",
params=[ params=[
pytest.param(AwsCliClient, marks=pytest.mark.aws), pytest.param(AwsCliClient, marks=[pytest.mark.aws, pytest.mark.weekly]),
pytest.param(Boto3ClientWrapper, marks=pytest.mark.boto3), pytest.param(Boto3ClientWrapper, marks=[pytest.mark.boto3, pytest.mark.nightly]),
], ],
) )
def s3_client( def s3_client(
@ -329,31 +329,60 @@ def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus:
return VersioningStatus.UNDEFINED return VersioningStatus.UNDEFINED
@reporter.step("Create/delete bucket") def unique_name(prefix: str) -> str:
@pytest.fixture return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}"
def bucket(s3_client: S3ClientWrapper, versioning_status: VersioningStatus, request: pytest.FixtureRequest):
bucket_name = s3_client.create_bucket()
@allure.title("[Session] Bulk create buckets for tests")
@pytest.fixture(scope="session")
def buckets_pool(s3_client: S3ClientWrapper, request: pytest.FixtureRequest):
test_buckets: list = []
s3_client_type = type(s3_client).__name__
for test in request.session.items:
if s3_client_type not in test.name:
continue
if "bucket" in test.fixturenames:
test_buckets.append(unique_name("bucket-"))
if "two_buckets" in test.fixturenames:
test_buckets.append(unique_name("bucket-"))
test_buckets.append(unique_name("bucket-"))
if test_buckets:
parallel(s3_client.create_bucket, test_buckets)
return test_buckets
@allure.title("[Test] Create bucket")
@pytest.fixture
def bucket(buckets_pool: list[str], s3_client: S3ClientWrapper, versioning_status: VersioningStatus):
if buckets_pool:
bucket_name = buckets_pool.pop()
else:
bucket_name = s3_client.create_bucket()
if versioning_status: if versioning_status:
s3_helper.set_bucket_versioning(s3_client, bucket_name, versioning_status) s3_helper.set_bucket_versioning(s3_client, bucket_name, versioning_status)
yield bucket_name return bucket_name
if "sanity" not in request.config.option.markexpr:
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
@reporter.step("Create two buckets") @allure.title("[Test] Create two buckets")
@pytest.fixture @pytest.fixture
def two_buckets(s3_client: S3ClientWrapper, request: pytest.FixtureRequest): def two_buckets(buckets_pool: list[str], s3_client: S3ClientWrapper) -> list[str]:
bucket_1 = s3_client.create_bucket() buckets: list[str] = []
bucket_2 = s3_client.create_bucket()
yield bucket_1, bucket_2
if "sanity" not in request.config.option.markexpr: for _ in range(2):
for bucket_name in [bucket_1, bucket_2]: if buckets_pool:
s3_helper.delete_bucket_with_objects(s3_client, bucket_name) buckets.append(buckets_pool.pop())
else:
buckets.append(s3_client.create_bucket())
return buckets
@allure.title("[Autouse/Session] Collect binary versions") @allure.title("[Autouse/Session] Collect binary versions")
@ -438,15 +467,6 @@ def readiness_on_node(cluster_node: ClusterNode):
), f"Service should be in active state more than {SERVICE_ACTIVE_TIME} minutes, current {active_minutes}m:{active_seconds}s" ), f"Service should be in active state more than {SERVICE_ACTIVE_TIME} minutes, current {active_minutes}m:{active_seconds}s"
@allure.title("[Autouse/Test] Run health check for all nodes")
@pytest.fixture(autouse=True)
def run_health_check(healthcheck: Healthcheck, cluster: Cluster, request: pytest.FixtureRequest):
if request.node.get_closest_marker("no_healthcheck"):
# Skip healthcheck for tests marked with no_healthcheck
return
parallel(healthcheck.storage_healthcheck, cluster.cluster_nodes)
@reporter.step("Prepare default user with wallet") @reporter.step("Prepare default user with wallet")
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def default_user(credentials_provider: CredentialsProvider, cluster: Cluster) -> User: def default_user(credentials_provider: CredentialsProvider, cluster: Cluster) -> User:

View file

@ -96,15 +96,6 @@ class TestFailoverServer(ClusterTestBase):
yield object_list yield object_list
@reporter.step("Select random node to stop and start it after test")
@pytest.fixture
def node_to_stop(
self, node_under_test: ClusterNode, cluster_state_controller: ClusterStateController
) -> ClusterNode:
yield node_under_test
with reporter.step(f"start {node_under_test.storage_node}"):
cluster_state_controller.start_stopped_hosts()
@reporter.step("Upload object with nodes and compare") @reporter.step("Upload object with nodes and compare")
def get_corrupted_objects_list( def get_corrupted_objects_list(
self, nodes: list[StorageNode], storage_objects: list[StorageObjectInfo] self, nodes: list[StorageNode], storage_objects: list[StorageObjectInfo]
@ -155,21 +146,16 @@ class TestFailoverServer(ClusterTestBase):
) )
return object_info, object_nodes return object_info, object_nodes
@pytest.fixture()
def up_stop_nodes(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.start_stopped_hosts()
@allure.title("Full shutdown node") @allure.title("Full shutdown node")
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True) @pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
def test_complete_node_shutdown( def test_complete_node_shutdown(
self, self,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
node_to_stop: ClusterNode, node_under_test: ClusterNode,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
): ):
with reporter.step(f"Remove {node_to_stop} from the list of nodes"): with reporter.step(f"Remove {node_under_test} from the list of nodes"):
alive_nodes = list(set(self.cluster.cluster_nodes) - {node_to_stop}) alive_nodes = list(set(self.cluster.cluster_nodes) - {node_under_test})
storage_nodes = [cluster.storage_node for cluster in alive_nodes] storage_nodes = [cluster.storage_node for cluster in alive_nodes]
@ -177,22 +163,22 @@ class TestFailoverServer(ClusterTestBase):
self.tick_epochs(1, storage_nodes[0], wait_block=2) self.tick_epochs(1, storage_nodes[0], wait_block=2)
with reporter.step(f"Stop node"): with reporter.step(f"Stop node"):
cluster_state_controller.stop_node_host(node=node_to_stop, mode="hard") cluster_state_controller.stop_node_host(node=node_under_test, mode="hard")
with reporter.step("Verify that there are no corrupted objects"): with reporter.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects) corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list assert not corrupted_objects_list
with reporter.step(f"check {node_to_stop.storage_node} in map"): with reporter.step(f"check {node_under_test.storage_node} in map"):
self.wait_node_in_map(node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0]) self.wait_node_in_map(node_under_test.storage_node, self.shell, alive_node=storage_nodes[0])
count_tick_epoch = int(alive_nodes[0].ir_node.get_netmap_cleaner_threshold()) + 4 count_tick_epoch = int(alive_nodes[0].ir_node.get_netmap_cleaner_threshold()) + 4
with reporter.step(f"Tick {count_tick_epoch} epochs and wait for 2 blocks"): with reporter.step(f"Tick {count_tick_epoch} epochs and wait for 2 blocks"):
self.tick_epochs(count_tick_epoch, storage_nodes[0], wait_block=2) self.tick_epochs(count_tick_epoch, storage_nodes[0], wait_block=2)
with reporter.step(f"Check {node_to_stop} in not map"): with reporter.step(f"Check {node_under_test} in not map"):
self.wait_node_not_in_map(node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0]) self.wait_node_not_in_map(node_under_test.storage_node, self.shell, alive_node=storage_nodes[0])
with reporter.step(f"Verify that there are no corrupted objects after {count_tick_epoch} epoch"): with reporter.step(f"Verify that there are no corrupted objects after {count_tick_epoch} epoch"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects) corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
@ -203,26 +189,26 @@ class TestFailoverServer(ClusterTestBase):
def test_temporarily_disable_a_node( def test_temporarily_disable_a_node(
self, self,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
node_to_stop: ClusterNode, node_under_test: ClusterNode,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
): ):
with reporter.step(f"Remove {node_to_stop} from the list of nodes"): with reporter.step(f"Remove {node_under_test} from the list of nodes"):
storage_nodes = list(set(self.cluster.storage_nodes) - {node_to_stop.storage_node}) storage_nodes = list(set(self.cluster.storage_nodes) - {node_under_test.storage_node})
with reporter.step("Tick epoch and wait for 2 blocks"): with reporter.step("Tick epoch and wait for 2 blocks"):
self.tick_epochs(1, storage_nodes[0], wait_block=2) self.tick_epochs(1, storage_nodes[0], wait_block=2)
with reporter.step(f"Stop node"): with reporter.step(f"Stop node"):
cluster_state_controller.stop_node_host(node_to_stop, "hard") cluster_state_controller.stop_node_host(node_under_test, "hard")
with reporter.step("Verify that there are no corrupted objects"): with reporter.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects) corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list assert not corrupted_objects_list
with reporter.step(f"Check {node_to_stop} in map"): with reporter.step(f"Check {node_under_test} in map"):
self.wait_node_in_map(node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0]) self.wait_node_in_map(node_under_test.storage_node, self.shell, alive_node=storage_nodes[0])
cluster_state_controller.start_node_host(node_to_stop) cluster_state_controller.start_node_host(node_under_test)
with reporter.step("Verify that there are no corrupted objects"): with reporter.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects) corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
@ -235,7 +221,6 @@ class TestFailoverServer(ClusterTestBase):
default_wallet: WalletInfo, default_wallet: WalletInfo,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
simple_file: str, simple_file: str,
up_stop_nodes: None,
): ):
object_info, object_nodes = object_and_nodes object_info, object_nodes = object_and_nodes
endpoint_without_object = list(set(self.cluster.cluster_nodes) - set(object_nodes))[ endpoint_without_object = list(set(self.cluster.cluster_nodes) - set(object_nodes))[
@ -262,7 +247,6 @@ class TestFailoverServer(ClusterTestBase):
default_wallet: WalletInfo, default_wallet: WalletInfo,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
simple_file: str, simple_file: str,
up_stop_nodes: None,
): ):
with reporter.step("Create container with full network map"): with reporter.step("Create container with full network map"):
node_count = len(self.cluster.cluster_nodes) node_count = len(self.cluster.cluster_nodes)
@ -282,12 +266,12 @@ class TestFailoverServer(ClusterTestBase):
object_nodes = get_object_nodes(self.cluster, cid, oid, self.cluster.cluster_nodes[0]) object_nodes = get_object_nodes(self.cluster, cid, oid, self.cluster.cluster_nodes[0])
with reporter.step("Choose node to stop"): with reporter.step("Choose node to stop"):
node_to_stop = random.choice(object_nodes) node_under_test = random.choice(object_nodes)
alive_node_with_object = random.choice(list(set(object_nodes) - {node_to_stop})) alive_node_with_object = random.choice(list(set(object_nodes) - {node_under_test}))
alive_endpoint_with_object = alive_node_with_object.storage_node.get_rpc_endpoint() alive_endpoint_with_object = alive_node_with_object.storage_node.get_rpc_endpoint()
with reporter.step("Stop random node with object"): with reporter.step("Stop random node with object"):
cluster_state_controller.stop_node_host(node_to_stop, "hard") cluster_state_controller.stop_node_host(node_under_test, "hard")
with reporter.step("Put object to alive node with object"): with reporter.step("Put object to alive node with object"):
oid_2 = put_object(default_wallet, simple_file, cid, self.shell, alive_endpoint_with_object) oid_2 = put_object(default_wallet, simple_file, cid, self.shell, alive_endpoint_with_object)

View file

@ -45,20 +45,6 @@ def file_keeper():
keeper.restore_files() keeper.restore_files()
@reporter.step("Return all stopped hosts")
@pytest.fixture(scope="function", autouse=True)
def after_run_return_all_stopped_hosts(cluster_state_controller: ClusterStateController) -> str:
yield
cluster_state_controller.start_stopped_hosts()
@reporter.step("Return all stopped services after test")
@pytest.fixture(scope="function")
def after_run_return_all_stopped_services(cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.start_all_stopped_services()
@pytest.mark.failover @pytest.mark.failover
@pytest.mark.failover_storage @pytest.mark.failover_storage
class TestFailoverStorage(ClusterTestBase): class TestFailoverStorage(ClusterTestBase):
@ -130,7 +116,6 @@ class TestFailoverStorage(ClusterTestBase):
default_wallet: WalletInfo, default_wallet: WalletInfo,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
after_run_return_all_stopped_services,
): ):
default_node = self.cluster.cluster_nodes[0] default_node = self.cluster.cluster_nodes[0]
@ -223,9 +208,6 @@ class TestEmptyMap(ClusterTestBase):
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name] bucket_objects = [file_name]
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
s3_client.put_object(bucket, file_path) s3_client.put_object(bucket, file_path)
@ -286,9 +268,6 @@ class TestEmptyMap(ClusterTestBase):
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name] bucket_objects = [file_name]
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
s3_client.put_object(bucket, file_path) s3_client.put_object(bucket, file_path)
@ -337,8 +316,8 @@ class TestEmptyMap(ClusterTestBase):
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
bucket: str,
): ):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
@ -380,9 +359,8 @@ class TestEmptyMap(ClusterTestBase):
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
bucket: str,
): ):
bucket = s3_client.create_bucket()
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
@ -422,8 +400,8 @@ class TestEmptyMap(ClusterTestBase):
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
versioning_status: VersioningStatus, versioning_status: VersioningStatus,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
bucket: str,
): ):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status) s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status)
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
@ -469,14 +447,9 @@ class TestStorageDataLoss(ClusterTestBase):
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
complex_object_size: ObjectSize, complex_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
after_run_return_all_stopped_services: str,
file_keeper: FileKeeper, file_keeper: FileKeeper,
bucket: str,
): ):
allure.dynamic.description(after_run_return_all_stopped_services)
with reporter.step("Create bucket"):
bucket = s3_client.create_bucket()
with reporter.step("Put objects into bucket"): with reporter.step("Put objects into bucket"):
simple_object_path = generate_file(simple_object_size.value) simple_object_path = generate_file(simple_object_size.value)
simple_object_key = s3_helper.object_key_from_file_path(simple_object_path) simple_object_key = s3_helper.object_key_from_file_path(simple_object_path)
@ -538,10 +511,8 @@ class TestStorageDataLoss(ClusterTestBase):
shards_watcher: ShardsWatcher, shards_watcher: ShardsWatcher,
default_wallet: WalletInfo, default_wallet: WalletInfo,
test_start_time: datetime, test_start_time: datetime,
after_run_return_all_stopped_services: str,
): ):
exception_messages = [] exception_messages = []
allure.dynamic.description(after_run_return_all_stopped_services)
with reporter.step(f"Create container on node {node_under_test}"): with reporter.step(f"Create container on node {node_under_test}"):
locode = node_under_test.storage_node.get_un_locode() locode = node_under_test.storage_node.get_un_locode()
@ -622,7 +593,6 @@ class TestStorageDataLoss(ClusterTestBase):
bucket, bucket,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
after_run_return_all_stopped_services,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
): ):
# TODO: need to check that s3 gate is connected to localhost (such metric will be supported in 1.3) # TODO: need to check that s3 gate is connected to localhost (such metric will be supported in 1.3)

View file

@ -1,7 +1,7 @@
import logging import logging
import random import random
from time import sleep from time import sleep
from typing import Optional, Tuple from typing import Callable, Optional, Tuple
import allure import allure
import pytest import pytest
@ -98,11 +98,6 @@ class TestNodeManagement(ClusterTestBase):
continue continue
return return
@pytest.fixture
def after_run_start_all_nodes(self):
yield
self.return_nodes()
@pytest.fixture @pytest.fixture
def return_nodes_after_test_run(self): def return_nodes_after_test_run(self):
yield yield
@ -312,7 +307,7 @@ class TestNodeManagement(ClusterTestBase):
self.return_nodes(alive_node) self.return_nodes(alive_node)
@reporter.step("Wait for object to be dropped") @reporter.step("Wait for object to be dropped")
def wait_for_obj_dropped(self, wallet: str, cid: str, oid: str, endpoint: str, checker) -> None: def wait_for_obj_dropped(self, wallet: str, cid: str, oid: str, endpoint: str, checker: Callable) -> None:
for _ in range(3): for _ in range(3):
try: try:
checker(wallet, cid, oid, shell=self.shell, endpoint=endpoint) checker(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
@ -360,8 +355,6 @@ class TestMaintenanceMode(ClusterTestBase):
for node_to_restore in nodes_to_restore: for node_to_restore in nodes_to_restore:
cluster_state_controller.set_node_status(node_to_restore, default_wallet, NodeStatus.ONLINE) cluster_state_controller.set_node_status(node_to_restore, default_wallet, NodeStatus.ONLINE)
self.tick_epoch(wait_block=2)
def check_node_status( def check_node_status(
self, expected_status: NodeStatus, node_under_test: ClusterNode, frostfs_cli: FrostfsCli, rpc_endpoint: str self, expected_status: NodeStatus, node_under_test: ClusterNode, frostfs_cli: FrostfsCli, rpc_endpoint: str
): ):
@ -407,7 +400,6 @@ class TestMaintenanceMode(ClusterTestBase):
node_under_test = nodes_with_container[0] node_under_test = nodes_with_container[0]
endpoint = node_under_test.storage_node.get_rpc_endpoint() endpoint = node_under_test.storage_node.get_rpc_endpoint()
restore_node_status.append(node_under_test)
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
oid = put_object( oid = put_object(
@ -417,7 +409,9 @@ class TestMaintenanceMode(ClusterTestBase):
shell=self.shell, shell=self.shell,
endpoint=endpoint, endpoint=endpoint,
) )
with reporter.step("Set node status to 'maintenance'"): with reporter.step("Set node status to 'maintenance'"):
restore_node_status.append(node_under_test)
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)
node_under_maintenance_error = "node is under maintenance" node_under_maintenance_error = "node is under maintenance"
@ -465,15 +459,9 @@ class TestMaintenanceMode(ClusterTestBase):
alive_storage_node = alive_nodes[0].storage_node alive_storage_node = alive_nodes[0].storage_node
alive_rpc_endpoint = alive_storage_node.get_rpc_endpoint() alive_rpc_endpoint = alive_storage_node.get_rpc_endpoint()
with reporter.step("Tick epoch"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Set node status to 'offline'"): with reporter.step("Set node status to 'offline'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.OFFLINE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.OFFLINE)
with reporter.step("Tick epoch to update the network map"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Check node status is 'offline' after update the network map"): with reporter.step("Check node status is 'offline' after update the network map"):
self.check_node_status(NodeStatus.OFFLINE, node_under_test, frostfs_cli, alive_rpc_endpoint) self.check_node_status(NodeStatus.OFFLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
@ -487,9 +475,6 @@ class TestMaintenanceMode(ClusterTestBase):
with reporter.step("Check node status is 'online' after storage service restart"): with reporter.step("Check node status is 'online' after storage service restart"):
self.check_node_status(NodeStatus.ONLINE, node_under_test, frostfs_cli, alive_rpc_endpoint) self.check_node_status(NodeStatus.ONLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Set node status to 'maintenance'"): with reporter.step("Set node status to 'maintenance'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)
@ -503,9 +488,6 @@ class TestMaintenanceMode(ClusterTestBase):
with reporter.step("Check node staus is 'maintenance' after storage service restart"): with reporter.step("Check node staus is 'maintenance' after storage service restart"):
self.check_node_status(NodeStatus.MAINTENANCE, node_under_test, frostfs_cli, alive_rpc_endpoint) self.check_node_status(NodeStatus.MAINTENANCE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Set node status to 'offline'"): with reporter.step("Set node status to 'offline'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.OFFLINE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.OFFLINE)
@ -518,18 +500,12 @@ class TestMaintenanceMode(ClusterTestBase):
with reporter.step("Start storage service"): with reporter.step("Start storage service"):
cluster_state_controller.start_storage_service(node_under_test) cluster_state_controller.start_storage_service(node_under_test)
with reporter.step("Check node status is 'offline' after storage service start"):
self.check_node_status(NodeStatus.OFFLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Tick 2 epochs"): with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2) self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Check node status is 'online' after storage service start"): with reporter.step("Check node status is 'online' after storage service start"):
self.check_node_status(NodeStatus.ONLINE, node_under_test, frostfs_cli, alive_rpc_endpoint) self.check_node_status(NodeStatus.ONLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Set node status to 'maintenance'"): with reporter.step("Set node status to 'maintenance'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)
@ -575,6 +551,3 @@ class TestMaintenanceMode(ClusterTestBase):
with reporter.step("Set node status to 'maintenance'"): with reporter.step("Set node status to 'maintenance'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)
with reporter.step("Set node status to 'online'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.ONLINE)

View file

@ -5,8 +5,8 @@ import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container, delete_container from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object from frostfs_testlib.steps.cli.object import head_object, put_object
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
@ -25,11 +25,6 @@ WAIT_FOR_REPLICATION = 60
@pytest.mark.failover @pytest.mark.failover
@pytest.mark.replication @pytest.mark.replication
class TestReplication(ClusterTestBase): class TestReplication(ClusterTestBase):
@pytest.fixture(autouse=True)
def start_stopped_nodes_after_test(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.start_stopped_hosts()
@allure.title("Replication (obj_size={object_size})") @allure.title("Replication (obj_size={object_size})")
def test_replication( def test_replication(
self, self,

View file

@ -3,7 +3,7 @@ import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import S3_BUCKET_DOES_NOT_ALLOW_ACL from frostfs_testlib.resources.error_patterns import S3_BUCKET_DOES_NOT_ALLOW_ACL
from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS, PUBLIC_READ_GRANTS, PUBLIC_READ_WRITE_GRANTS from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS, PUBLIC_READ_GRANTS, PUBLIC_READ_WRITE_GRANTS
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper from frostfs_testlib.s3 import S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
@ -13,7 +13,6 @@ from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.s3_gate @pytest.mark.s3_gate
class TestS3GateACL: class TestS3GateACL:
@allure.title("Object ACL (s3_client={s3_client})") @allure.title("Object ACL (s3_client={s3_client})")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_s3_object_ACL(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize): def test_s3_object_ACL(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
@ -29,8 +28,24 @@ class TestS3GateACL:
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL): with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
object_grants = s3_client.put_object_acl(bucket, file_name, acl="public-read") object_grants = s3_client.put_object_acl(bucket, file_name, acl="public-read")
@allure.title("Create Bucket with different ACL (s3_client={s3_client})")
def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper):
with reporter.step("Create bucket with ACL private"):
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private")
bucket_grants = s3_client.get_bucket_acl(bucket)
s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS)
with reporter.step("Create bucket with ACL public-read"):
read_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read")
bucket_grants = s3_client.get_bucket_acl(read_bucket)
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS)
with reporter.step("Create bucket with ACL public-read-write"):
public_rw_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
bucket_grants = s3_client.get_bucket_acl(public_rw_bucket)
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS)
@allure.title("Bucket ACL (s3_client={s3_client})") @allure.title("Bucket ACL (s3_client={s3_client})")
@pytest.mark.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper): def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper):
with reporter.step("Create bucket with public-read-write ACL"): with reporter.step("Create bucket with public-read-write ACL"):
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write") bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")

View file

@ -3,8 +3,7 @@ from datetime import datetime, timedelta
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS, PUBLIC_READ_GRANTS, PUBLIC_READ_WRITE_GRANTS from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3 import S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
@ -13,23 +12,77 @@ from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_bucket @pytest.mark.s3_gate_bucket
class TestS3GateBucket: class TestS3GateBucket:
@allure.title("Create Bucket with different ACL (s3_client={s3_client})") @allure.title("Bucket API (s3_client={s3_client})")
def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper): def test_s3_buckets(
self,
s3_client: S3ClientWrapper,
simple_object_size: ObjectSize,
):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
with reporter.step("Create bucket with ACL private"): file_path = generate_file(simple_object_size.value)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private") file_name = s3_helper.object_key_from_file_path(file_path)
bucket_grants = s3_client.get_bucket_acl(bucket)
s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS)
with reporter.step("Create bucket with ACL public-read"): with reporter.step("Create buckets"):
read_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read") bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
bucket_grants = s3_client.get_bucket_acl(read_bucket) s3_helper.set_bucket_versioning(s3_client, bucket_1, VersioningStatus.ENABLED)
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS) bucket_2 = s3_client.create_bucket()
with reporter.step("Create bucket with ACL public-read-write"): with reporter.step("Check buckets are presented in the system"):
public_rw_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write") buckets = s3_client.list_buckets()
bucket_grants = s3_client.get_bucket_acl(public_rw_bucket) assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS) assert bucket_2 in buckets, f"Expected bucket {bucket_2} is in the list"
with reporter.step("Bucket must be empty"):
for bucket in (bucket_1, bucket_2):
with reporter.step("Verify default list command"):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Verify V2 list command"):
objects_list = s3_client.list_objects_v2(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Check buckets are visible with S3 head command"):
s3_client.head_bucket(bucket_1)
s3_client.head_bucket(bucket_2)
with reporter.step("Check we can put/list object with S3 commands"):
version_id = s3_client.put_object(bucket_1, file_path)
s3_client.head_object(bucket_1, file_name)
bucket_objects = s3_client.list_objects(bucket_1)
assert file_name in bucket_objects, f"Expected file {file_name} in objects list {bucket_objects}"
with reporter.step("Try to delete not empty bucket and get error"):
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
s3_client.delete_bucket(bucket_1)
s3_client.head_bucket(bucket_1)
with reporter.step(f"Delete empty bucket {bucket_2}"):
s3_client.delete_bucket(bucket_2)
with reporter.step(f"Check bucket {bucket_2} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_2)
buckets = s3_client.list_buckets()
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 not in buckets, f"Expected bucket {bucket_2} is not in the list"
with reporter.step(f"Delete object from {bucket_1}"):
s3_client.delete_object(bucket_1, file_name, version_id)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=[])
with reporter.step(f"Delete bucket {bucket_1}"):
s3_client.delete_bucket(bucket_1)
with reporter.step(f"Check bucket {bucket_1} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_1)
@allure.title("Create bucket with object lock (s3_client={s3_client})") @allure.title("Create bucket with object lock (s3_client={s3_client})")
def test_s3_bucket_object_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize): def test_s3_bucket_object_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):

View file

@ -1,516 +0,0 @@
import logging
import os
from random import choice, choices
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import (
TestFile,
generate_file,
generate_file_with_content,
get_file_content,
get_file_hash,
split_file,
)
logger = logging.getLogger("NeoLogger")
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw#frostfs-s3-gw", name="frostfs-s3-gateway")
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_base
class TestS3Gate:
@allure.title("Bucket API (s3_client={s3_client})")
def test_s3_buckets(
self,
s3_client: S3ClientWrapper,
client_shell: Shell,
cluster: Cluster,
simple_object_size: ObjectSize,
):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with reporter.step("Create buckets"):
bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
s3_helper.set_bucket_versioning(s3_client, bucket_1, VersioningStatus.ENABLED)
bucket_2 = s3_client.create_bucket()
with reporter.step("Check buckets are presented in the system"):
buckets = s3_client.list_buckets()
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 in buckets, f"Expected bucket {bucket_2} is in the list"
with reporter.step("Bucket must be empty"):
for bucket in (bucket_1, bucket_2):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Check buckets are visible with S3 head command"):
s3_client.head_bucket(bucket_1)
s3_client.head_bucket(bucket_2)
with reporter.step("Check we can put/list object with S3 commands"):
version_id = s3_client.put_object(bucket_1, file_path)
s3_client.head_object(bucket_1, file_name)
bucket_objects = s3_client.list_objects(bucket_1)
assert file_name in bucket_objects, f"Expected file {file_name} in objects list {bucket_objects}"
with reporter.step("Try to delete not empty bucket and get error"):
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
s3_client.delete_bucket(bucket_1)
s3_client.head_bucket(bucket_1)
with reporter.step(f"Delete empty bucket {bucket_2}"):
s3_client.delete_bucket(bucket_2)
tick_epoch(client_shell, cluster)
with reporter.step(f"Check bucket {bucket_2} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_2)
buckets = s3_client.list_buckets()
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 not in buckets, f"Expected bucket {bucket_2} is not in the list"
with reporter.step(f"Delete object from {bucket_1}"):
s3_client.delete_object(bucket_1, file_name, version_id)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=[])
with reporter.step(f"Delete bucket {bucket_1}"):
s3_client.delete_bucket(bucket_1)
tick_epoch(client_shell, cluster)
with reporter.step(f"Check bucket {bucket_1} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_1)
@allure.title("Object API (obj_size={object_size}, s3_client={s3_client})")
@pytest.mark.parametrize(
"object_size",
["simple", "complex"],
indirect=True,
)
def test_s3_api_object(
self,
s3_client: S3ClientWrapper,
object_size: ObjectSize,
two_buckets: tuple[str, str],
):
"""
Test base S3 Object API (Put/Head/List) for simple and complex objects.
"""
file_path = generate_file(object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_1, bucket_2 = two_buckets
for bucket in (bucket_1, bucket_2):
with reporter.step("Bucket must be empty"):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
s3_client.put_object(bucket, file_path)
s3_client.head_object(bucket, file_name)
bucket_objects = s3_client.list_objects(bucket)
assert file_name in bucket_objects, f"Expected file {file_name} in objects list {bucket_objects}"
with reporter.step("Check object's attributes"):
for attrs in (["ETag"], ["ObjectSize", "StorageClass"]):
s3_client.get_object_attributes(bucket, file_name, attrs)
@allure.title("Sync directory (s3_client={s3_client})")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_s3_sync_dir(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
"""
Test checks sync directory with AWS CLI utility.
"""
test_file_1 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1"))
test_file_2 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2"))
key_to_path = {"test_file_1": test_file_1.path, "test_file_2": test_file_2.path}
generate_file_with_content(simple_object_size.value, test_file_1)
generate_file_with_content(simple_object_size.value, test_file_2)
s3_client.sync(bucket, os.path.dirname(test_file_1))
with reporter.step("Check objects are synced"):
objects = s3_client.list_objects(bucket)
with reporter.step("Check these are the same objects"):
assert set(key_to_path.keys()) == set(objects), f"Expected all objects saved. Got {objects}"
for obj_key in objects:
got_object = s3_client.get_object(bucket, obj_key)
assert get_file_hash(got_object) == get_file_hash(
key_to_path.get(obj_key)
), "Expected hashes are the same"
@allure.title("Object versioning (s3_client={s3_client})")
def test_s3_api_versioning(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
"""
Test checks basic versioning functionality for S3 bucket.
"""
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with reporter.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple)
generate_file_with_content(simple_object_size.value, file_path=file_name_simple, content=version_2_content)
version_id_2 = s3_client.put_object(bucket, file_name_simple)
with reporter.step("Check bucket shows all versions"):
versions = s3_client.list_objects_versions(bucket)
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
assert obj_versions == {
version_id_1,
version_id_2,
}, f"Expected object has versions: {version_id_1, version_id_2}"
with reporter.step("Show information about particular version"):
for version_id in (version_id_1, version_id_2):
response = s3_client.head_object(bucket, obj_key, version_id=version_id)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert response.get("VersionId") == version_id, f"Expected VersionId is {version_id}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
with reporter.step("Check object's attributes"):
for version_id in (version_id_1, version_id_2):
got_attrs = s3_client.get_object_attributes(bucket, obj_key, ["ETag"], version_id=version_id)
if got_attrs:
assert got_attrs.get("VersionId") == version_id, f"Expected VersionId is {version_id}"
with reporter.step("Delete object and check it was deleted"):
response = s3_client.delete_object(bucket, obj_key)
version_id_delete = response.get("VersionId")
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_object(bucket, obj_key)
with reporter.step("Get content for all versions and check it is correct"):
for version, content in (
(version_id_2, version_2_content),
(version_id_1, version_1_content),
):
file_name = s3_client.get_object(bucket, obj_key, version_id=version)
got_content = get_file_content(file_name)
assert got_content == content, f"Expected object content is\n{content}\nGot\n{got_content}"
with reporter.step("Restore previous object version"):
s3_client.delete_object(bucket, obj_key, version_id=version_id_delete)
file_name = s3_client.get_object(bucket, obj_key)
got_content = get_file_content(file_name)
assert (
got_content == version_2_content
), f"Expected object content is\n{version_2_content}\nGot\n{got_content}"
@pytest.mark.s3_gate_multipart
@allure.title("Object Multipart API (s3_client={s3_client})")
def test_s3_api_multipart(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
"""
Test checks S3 Multipart API (Create multipart upload/Abort multipart upload/List multipart upload/
Upload part/List parts/Complete multipart upload).
"""
parts_count = 3
file_name_large = generate_file(simple_object_size.value * 1024 * 6 * parts_count) # 5Mb - min part
object_key = s3_helper.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with reporter.step("Create and abort multipart upload"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
uploads = s3_client.list_multipart_uploads(bucket)
assert uploads, f"Expected there one upload in bucket {bucket}"
assert uploads[0].get("Key") == object_key, f"Expected correct key {object_key} in upload {uploads}"
assert uploads[0].get("UploadId") == upload_id, f"Expected correct UploadId {upload_id} in upload {uploads}"
s3_client.abort_multipart_upload(bucket, object_key, upload_id)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with reporter.step("Create new multipart upload and upload several parts"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
for part_id, file_path in enumerate(part_files, start=1):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with reporter.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with reporter.step("Check we can get whole object from bucket"):
got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)
self.check_object_attributes(s3_client, bucket, object_key, parts_count)
@allure.title("Bucket tagging API (s3_client={s3_client})")
def test_s3_api_bucket_tagging(self, s3_client: S3ClientWrapper, bucket: str):
"""
Test checks S3 Bucket tagging API (Put tag/Get tag).
"""
key_value_pair = [("some-key", "some-value"), ("some-key-2", "some-value-2")]
s3_client.put_bucket_tagging(bucket, key_value_pair)
s3_helper.check_tags_by_bucket(s3_client, bucket, key_value_pair)
s3_client.delete_bucket_tagging(bucket)
s3_helper.check_tags_by_bucket(s3_client, bucket, [])
@allure.title("Object tagging API (s3_client={s3_client})")
def test_s3_api_object_tagging(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
"""
Test checks S3 Object tagging API (Put tag/Get tag/Update tag).
"""
key_value_pair_bucket = [("some-key", "some-value"), ("some-key-2", "some-value-2")]
key_value_pair_obj = [
("some-key-obj", "some-value-obj"),
("some-key--obj2", "some-value--obj2"),
]
key_value_pair_obj_new = [("some-key-obj-new", "some-value-obj-new")]
file_name_simple = generate_file(simple_object_size.value)
obj_key = s3_helper.object_key_from_file_path(file_name_simple)
s3_client.put_bucket_tagging(bucket, key_value_pair_bucket)
s3_client.put_object(bucket, file_name_simple)
for tags in (key_value_pair_obj, key_value_pair_obj_new):
s3_client.put_object_tagging(bucket, obj_key, tags)
s3_helper.check_tags_by_object(
s3_client,
bucket,
obj_key,
tags,
)
s3_client.delete_object_tagging(bucket, obj_key)
s3_helper.check_tags_by_object(s3_client, bucket, obj_key, [])
@allure.title("Delete object & delete objects (s3_client={s3_client})")
def test_s3_api_delete(
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
From second bucket some objects deleted all at once.
"""
max_obj_count = 20
max_delete_objects = 17
put_objects = []
file_paths = []
obj_sizes = [simple_object_size, complex_object_size]
bucket_1, bucket_2 = two_buckets
with reporter.step(f"Generate {max_obj_count} files"):
for _ in range(max_obj_count):
file_paths.append(generate_file(choice(obj_sizes).value))
for bucket in (bucket_1, bucket_2):
with reporter.step(f"Bucket {bucket} must be empty as it just created"):
objects_list = s3_client.list_objects_v2(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
for file_path in file_paths:
s3_client.put_object(bucket, file_path)
put_objects.append(s3_helper.object_key_from_file_path(file_path))
with reporter.step(f"Check all objects put in bucket {bucket} successfully"):
bucket_objects = s3_client.list_objects_v2(bucket)
assert set(put_objects) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
with reporter.step("Delete some objects from bucket_1 one by one"):
objects_to_delete_b1 = choices(put_objects, k=max_delete_objects)
for obj in objects_to_delete_b1:
s3_client.delete_object(bucket_1, obj)
with reporter.step("Check deleted objects are not visible in bucket bucket_1"):
bucket_objects = s3_client.list_objects_v2(bucket_1)
assert set(put_objects).difference(set(objects_to_delete_b1)) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
for object_key in objects_to_delete_b1:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_1, object_key)
with reporter.step("Delete some objects from bucket_2 at once"):
objects_to_delete_b2 = choices(put_objects, k=max_delete_objects)
s3_client.delete_objects(bucket_2, objects_to_delete_b2)
with reporter.step("Check deleted objects are not visible in bucket bucket_2"):
objects_list = s3_client.list_objects_v2(bucket_2)
assert set(put_objects).difference(set(objects_to_delete_b2)) == set(
objects_list
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
for object_key in objects_to_delete_b2:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_2, object_key)
@allure.title("Copy object to the same bucket (s3_client={s3_client})")
def test_s3_copy_same_bucket(
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
):
"""
Test object can be copied to the same bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
file_name_simple = s3_helper.object_key_from_file_path(file_path_simple)
file_name_large = s3_helper.object_key_from_file_path(file_path_large)
bucket_objects = [file_name_simple, file_name_large]
with reporter.step("Bucket must be empty"):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put objects into bucket"):
for file_path in (file_path_simple, file_path_large):
s3_client.put_object(bucket, file_path)
with reporter.step("Copy one object into the same bucket"):
copy_obj_path = s3_client.copy_object(bucket, file_name_simple)
bucket_objects.append(copy_obj_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
with reporter.step("Check copied object has the same content"):
got_copied_file = s3_client.get_object(bucket, copy_obj_path)
assert get_file_hash(file_path_simple) == get_file_hash(got_copied_file), "Hashes must be the same"
with reporter.step("Delete one object from bucket"):
s3_client.delete_object(bucket, file_name_simple)
bucket_objects.remove(file_name_simple)
s3_helper.check_objects_in_bucket(
s3_client,
bucket,
expected_objects=bucket_objects,
unexpected_objects=[file_name_simple],
)
@allure.title("Copy object to another bucket (s3_client={s3_client})")
def test_s3_copy_to_another_bucket(
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
):
"""
Test object can be copied to another bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
file_name_simple = s3_helper.object_key_from_file_path(file_path_simple)
file_name_large = s3_helper.object_key_from_file_path(file_path_large)
bucket_1_objects = [file_name_simple, file_name_large]
bucket_1, bucket_2 = two_buckets
with reporter.step("Buckets must be empty"):
for bucket in (bucket_1, bucket_2):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put objects into one bucket"):
for file_path in (file_path_simple, file_path_large):
s3_client.put_object(bucket_1, file_path)
with reporter.step("Copy object from first bucket into second"):
copy_obj_path_b2 = s3_client.copy_object(bucket_1, file_name_large, bucket=bucket_2)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with reporter.step("Check copied object has the same content"):
got_copied_file_b2 = s3_client.get_object(bucket_2, copy_obj_path_b2)
assert get_file_hash(file_path_large) == get_file_hash(got_copied_file_b2), "Hashes must be the same"
with reporter.step("Delete one object from first bucket"):
s3_client.delete_object(bucket_1, file_name_simple)
bucket_1_objects.remove(file_name_simple)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with reporter.step("Delete one object from second bucket and check it is empty"):
s3_client.delete_object(bucket_2, copy_obj_path_b2)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[])
def check_object_attributes(self, s3_client: S3ClientWrapper, bucket: str, object_key: str, parts_count: int):
if not isinstance(s3_client, AwsCliClient):
logger.warning("Attributes check is not supported for boto3 implementation")
return
with reporter.step("Check object's attributes"):
obj_parts = s3_client.get_object_attributes(bucket, object_key, ["ObjectParts"], full_output=False)
assert obj_parts.get("TotalPartsCount") == parts_count, f"Expected TotalPartsCount is {parts_count}"
assert len(obj_parts.get("Parts")) == parts_count, f"Expected Parts cunt is {parts_count}"
with reporter.step("Check object's attribute max-parts"):
max_parts = 2
obj_parts = s3_client.get_object_attributes(
bucket,
object_key,
["ObjectParts"],
max_parts=max_parts,
full_output=False,
)
assert obj_parts.get("TotalPartsCount") == parts_count, f"Expected TotalPartsCount is {parts_count}"
assert obj_parts.get("MaxParts") == max_parts, f"Expected MaxParts is {parts_count}"
assert len(obj_parts.get("Parts")) == max_parts, f"Expected Parts count is {parts_count}"
with reporter.step("Check object's attribute part-number-marker"):
part_number_marker = 3
obj_parts = s3_client.get_object_attributes(
bucket,
object_key,
["ObjectParts"],
part_number=part_number_marker,
full_output=False,
)
assert obj_parts.get("TotalPartsCount") == parts_count, f"Expected TotalPartsCount is {parts_count}"
assert (
obj_parts.get("PartNumberMarker") == part_number_marker
), f"Expected PartNumberMarker is {part_number_marker}"
assert len(obj_parts.get("Parts")) == 1, f"Expected Parts count is {parts_count}"

View file

@ -10,23 +10,34 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
@allure.title("[Module] Create bucket with object_lock_enabled_for_bucket")
@pytest.fixture(scope="module")
def bucket_w_lock(s3_client: S3ClientWrapper):
return s3_client.create_bucket(object_lock_enabled_for_bucket=True)
@allure.title("[Module] Create bucket without object_lock_enabled_for_bucket")
@pytest.fixture(scope="module")
def bucket_no_lock(s3_client: S3ClientWrapper):
return s3_client.create_bucket(object_lock_enabled_for_bucket=False)
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_locking @pytest.mark.s3_gate_locking
@pytest.mark.parametrize("version_id", [None, "second"]) @pytest.mark.parametrize("version_id", [None, "second"])
class TestS3GateLocking: class TestS3GateLocking:
@allure.title("Retention period and legal lock on object (version_id={version_id}, s3_client={s3_client})") @allure.title("Retention period and legal lock on object (version_id={version_id}, s3_client={s3_client})")
def test_s3_object_locking(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize): def test_s3_object_locking(
self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 2 retention_period = 2
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with reporter.step("Put several versions of object into bucket"): with reporter.step("Put several versions of object into bucket"):
s3_client.put_object(bucket, file_path) s3_client.put_object(bucket_w_lock, file_path)
file_name_1 = generate_file_with_content(simple_object_size.value, file_path=file_path) file_name_1 = generate_file_with_content(simple_object_size.value, file_path=file_path)
version_id_2 = s3_client.put_object(bucket, file_name_1) version_id_2 = s3_client.put_object(bucket_w_lock, file_name_1)
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
if version_id: if version_id:
version_id = version_id_2 version_id = version_id_2
@ -36,45 +47,44 @@ class TestS3GateLocking:
"Mode": "COMPLIANCE", "Mode": "COMPLIANCE",
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
s3_client.put_object_retention(bucket, file_name, retention, version_id) s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF") s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "COMPLIANCE", date_obj, "OFF")
with reporter.step(f"Put legal hold to object {file_name}"): with reporter.step(f"Put legal hold to object {file_name}"):
s3_client.put_object_legal_hold(bucket, file_name, "ON", version_id) s3_client.put_object_legal_hold(bucket_w_lock, file_name, "ON", version_id)
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON") s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "COMPLIANCE", date_obj, "ON")
with reporter.step("Fail with deleting object with legal hold and retention period"): with reporter.step("Fail with deleting object with legal hold and retention period"):
if version_id: if version_id:
with pytest.raises(Exception): with pytest.raises(Exception):
# An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied. # An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied.
s3_client.delete_object(bucket, file_name, version_id) s3_client.delete_object(bucket_w_lock, file_name, version_id)
with reporter.step("Check retention period is no longer set on the uploaded object"): with reporter.step("Check retention period is no longer set on the uploaded object"):
time.sleep((retention_period + 1) * 60) time.sleep((retention_period + 1) * 60)
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON") s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "COMPLIANCE", date_obj, "ON")
with reporter.step("Fail with deleting object with legal hold and retention period"): with reporter.step("Fail with deleting object with legal hold and retention period"):
if version_id: if version_id:
with pytest.raises(Exception): with pytest.raises(Exception):
# An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied. # An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied.
s3_client.delete_object(bucket, file_name, version_id) s3_client.delete_object(bucket_w_lock, file_name, version_id)
else: else:
s3_client.delete_object(bucket, file_name, version_id) s3_client.delete_object(bucket_w_lock, file_name, version_id)
@allure.title("Impossible to change retention mode COMPLIANCE (version_id={version_id}, s3_client={s3_client})") @allure.title("Impossible to change retention mode COMPLIANCE (version_id={version_id}, s3_client={s3_client})")
def test_s3_mode_compliance(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize): def test_s3_mode_compliance(
self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 2 retention_period = 2
retention_period_1 = 1 retention_period_1 = 1
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
obj_version = s3_client.put_object(bucket, file_path) obj_version = s3_client.put_object(bucket_w_lock, file_path)
if version_id: if version_id:
version_id = obj_version version_id = obj_version
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with reporter.step(f"Put retention period {retention_period}min to object {file_name}"): with reporter.step(f"Put retention period {retention_period}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period) date_obj = datetime.utcnow() + timedelta(minutes=retention_period)
@ -82,8 +92,8 @@ class TestS3GateLocking:
"Mode": "COMPLIANCE", "Mode": "COMPLIANCE",
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
s3_client.put_object_retention(bucket, file_name, retention, version_id) s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF") s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "COMPLIANCE", date_obj, "OFF")
with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"): with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1) date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
@ -92,23 +102,22 @@ class TestS3GateLocking:
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.put_object_retention(bucket, file_name, retention, version_id) s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id)
@allure.title("Change retention mode GOVERNANCE (version_id={version_id}, s3_client={s3_client})") @allure.title("Change retention mode GOVERNANCE (version_id={version_id}, s3_client={s3_client})")
def test_s3_mode_governance(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize): def test_s3_mode_governance(
self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 3 retention_period = 3
retention_period_1 = 2 retention_period_1 = 2
retention_period_2 = 5 retention_period_2 = 5
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
obj_version = s3_client.put_object(bucket, file_path) obj_version = s3_client.put_object(bucket_w_lock, file_path)
if version_id: if version_id:
version_id = obj_version version_id = obj_version
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with reporter.step(f"Put retention period {retention_period}min to object {file_name}"): with reporter.step(f"Put retention period {retention_period}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period) date_obj = datetime.utcnow() + timedelta(minutes=retention_period)
@ -116,8 +125,8 @@ class TestS3GateLocking:
"Mode": "GOVERNANCE", "Mode": "GOVERNANCE",
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
s3_client.put_object_retention(bucket, file_name, retention, version_id) s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF") s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "GOVERNANCE", date_obj, "OFF")
with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"): with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1) date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
@ -126,7 +135,7 @@ class TestS3GateLocking:
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.put_object_retention(bucket, file_name, retention, version_id) s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id)
with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"): with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1) date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
@ -135,7 +144,7 @@ class TestS3GateLocking:
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.put_object_retention(bucket, file_name, retention, version_id) s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id)
with reporter.step(f"Put new retention period {retention_period_2}min to object {file_name}"): with reporter.step(f"Put new retention period {retention_period_2}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_2) date_obj = datetime.utcnow() + timedelta(minutes=retention_period_2)
@ -143,51 +152,48 @@ class TestS3GateLocking:
"Mode": "GOVERNANCE", "Mode": "GOVERNANCE",
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
s3_client.put_object_retention(bucket, file_name, retention, version_id, True) s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id, True)
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF") s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "GOVERNANCE", date_obj, "OFF")
@allure.title( @allure.title(
"[NEGATIVE] Lock object in bucket with disabled locking (version_id={version_id}, s3_client={s3_client})" "[NEGATIVE] Lock object in bucket with disabled locking (version_id={version_id}, s3_client={s3_client})"
) )
def test_s3_legal_hold(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize): def test_s3_legal_hold(
self, s3_client: S3ClientWrapper, bucket_no_lock: str, version_id: str, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
obj_version = s3_client.put_object(bucket, file_path) obj_version = s3_client.put_object(bucket_no_lock, file_path)
if version_id: if version_id:
version_id = obj_version version_id = obj_version
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with reporter.step(f"Put legal hold to object {file_name}"): with reporter.step(f"Put legal hold to object {file_name}"):
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.put_object_legal_hold(bucket, file_name, "ON", version_id) s3_client.put_object_legal_hold(bucket_no_lock, file_name, "ON", version_id)
@pytest.mark.s3_gate @pytest.mark.s3_gate
class TestS3GateLockingBucket: class TestS3GateLockingBucket:
@allure.title("Bucket Lock (s3_client={s3_client})") @allure.title("Bucket Lock (s3_client={s3_client})")
def test_s3_bucket_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize): def test_s3_bucket_lock(self, s3_client: S3ClientWrapper, bucket_w_lock: str, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
configuration = {"Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}} configuration = {"Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}}
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with reporter.step("PutObjectLockConfiguration with ObjectLockEnabled=False"): with reporter.step("PutObjectLockConfiguration with ObjectLockEnabled=False"):
s3_client.put_object_lock_configuration(bucket, configuration) s3_client.put_object_lock_configuration(bucket_w_lock, configuration)
with reporter.step("PutObjectLockConfiguration with ObjectLockEnabled=True"): with reporter.step("PutObjectLockConfiguration with ObjectLockEnabled=True"):
configuration["ObjectLockEnabled"] = "Enabled" configuration["ObjectLockEnabled"] = "Enabled"
s3_client.put_object_lock_configuration(bucket, configuration) s3_client.put_object_lock_configuration(bucket_w_lock, configuration)
with reporter.step("GetObjectLockConfiguration"): with reporter.step("GetObjectLockConfiguration"):
config = s3_client.get_object_lock_configuration(bucket) config = s3_client.get_object_lock_configuration(bucket_w_lock)
configuration["Rule"]["DefaultRetention"]["Years"] = 0 configuration["Rule"]["DefaultRetention"]["Years"] = 0
assert config == configuration, f"Configurations must be equal {configuration}" assert config == configuration, f"Configurations must be equal {configuration}"
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
s3_client.put_object(bucket, file_path) s3_client.put_object(bucket_w_lock, file_path)
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", None, "OFF", 1) s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "COMPLIANCE", None, "OFF", 1)

View file

@ -46,15 +46,18 @@ class TestS3GateMultipart(ClusterTestBase):
for part_id, file_path in enumerate(part_files[1:], start=2): for part_id, file_path in enumerate(part_files[1:], start=2):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path) etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag)) parts.append((part_id, etag))
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
response = s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
with reporter.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
with reporter.step("Complete multipart upload"):
response = s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
version_id = None version_id = None
if versioning_status == VersioningStatus.ENABLED: if versioning_status == VersioningStatus.ENABLED:
version_id = response["VersionId"] version_id = response["VersionId"]
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
with reporter.step("Check upload list is empty"): with reporter.step("There should be no multipart uploads"):
uploads = s3_client.list_multipart_uploads(bucket) uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}" assert not uploads, f"Expected there is no uploads in bucket {bucket}"
@ -62,12 +65,8 @@ class TestS3GateMultipart(ClusterTestBase):
got_object = s3_client.get_object(bucket, object_key) got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large) assert get_file_hash(got_object) == get_file_hash(file_name_large)
if version_id: with reporter.step("Delete the object"):
with reporter.step("Delete the object version"): s3_client.delete_object(bucket, object_key, version_id)
s3_client.delete_object(bucket, object_key, version_id)
else:
with reporter.step("Delete the object"):
s3_client.delete_object(bucket, object_key)
with reporter.step("List objects in the bucket, expect to be empty"): with reporter.step("List objects in the bucket, expect to be empty"):
objects_list = s3_client.list_objects(bucket) objects_list = s3_client.list_objects(bucket)
@ -146,7 +145,9 @@ class TestS3GateMultipart(ClusterTestBase):
with reporter.step("Create multipart upload object"): with reporter.step("Create multipart upload object"):
upload_id = s3_client.create_multipart_upload(bucket, object_key) upload_id = s3_client.create_multipart_upload(bucket, object_key)
uploads = s3_client.list_multipart_uploads(bucket) uploads = s3_client.list_multipart_uploads(bucket)
assert uploads, f"Expected there are uploads in bucket {bucket}" assert len(uploads) == 1, f"Expected one upload in bucket {bucket}"
assert uploads[0].get("Key") == object_key, f"Expected correct key {object_key} in upload {uploads}"
assert uploads[0].get("UploadId") == upload_id, f"Expected correct UploadId {upload_id} in upload {uploads}"
with reporter.step("Upload parts to multipart upload"): with reporter.step("Upload parts to multipart upload"):
for part_id, obj_key in enumerate(objs, start=1): for part_id, obj_key in enumerate(objs, start=1):

View file

@ -1,8 +1,8 @@
import os import os
import random
import string import string
import uuid import uuid
from datetime import datetime, timedelta from datetime import datetime, timedelta
from random import choices, sample
from typing import Literal from typing import Literal
import allure import allure
@ -35,11 +35,44 @@ class TestS3GateObject:
public_key = wallet_utils.get_wallet_public_key(second_wallet, DEFAULT_WALLET_PASS) public_key = wallet_utils.get_wallet_public_key(second_wallet, DEFAULT_WALLET_PASS)
yield public_key yield public_key
@allure.title("Object API (obj_size={object_size}, s3_client={s3_client})")
@pytest.mark.parametrize(
"object_size",
["simple", "complex"],
indirect=True,
)
def test_s3_api_object(
self,
s3_client: S3ClientWrapper,
object_size: ObjectSize,
bucket: str,
):
"""
Test base S3 Object API (Put/Head/List) for simple and complex objects.
"""
with reporter.step("Prepare object to upload"):
test_file = generate_file(object_size.value)
file_name = s3_helper.object_key_from_file_path(test_file)
with reporter.step("Put object to bucket"):
s3_client.put_object(bucket, test_file)
with reporter.step("Head object from bucket"):
s3_client.head_object(bucket, file_name)
with reporter.step("Verify object in list"):
bucket_objects = s3_client.list_objects(bucket)
assert file_name in bucket_objects, f"Expected file {file_name} in objects list {bucket_objects}"
with reporter.step("Check object's attributes"):
for attrs in (["ETag"], ["ObjectSize", "StorageClass"]):
s3_client.get_object_attributes(bucket, file_name, attrs)
@allure.title("Copy object (s3_client={s3_client})") @allure.title("Copy object (s3_client={s3_client})")
def test_s3_copy_object( def test_s3_copy_object(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
two_buckets: tuple[str, str], two_buckets: list[str],
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
): ):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
@ -48,9 +81,6 @@ class TestS3GateObject:
bucket_1, bucket_2 = two_buckets bucket_1, bucket_2 = two_buckets
objects_list = s3_client.list_objects(bucket_1)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put object into one bucket"): with reporter.step("Put object into one bucket"):
s3_client.put_object(bucket_1, file_path) s3_client.put_object(bucket_1, file_path)
@ -85,7 +115,7 @@ class TestS3GateObject:
def test_s3_copy_version_object( def test_s3_copy_version_object(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
two_buckets: tuple[str, str], two_buckets: list[str],
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
): ):
version_1_content = "Version 1" version_1_content = "Version 1"
@ -306,7 +336,7 @@ class TestS3GateObject:
assert obj_versions == version_ids, f"Object should have versions: {version_ids}" assert obj_versions == version_ids, f"Object should have versions: {version_ids}"
with reporter.step("Delete two objects from bucket one by one"): with reporter.step("Delete two objects from bucket one by one"):
version_to_delete_b1 = sample([version_id_1, version_id_2, version_id_3, version_id_4], k=2) version_to_delete_b1 = random.sample([version_id_1, version_id_2, version_id_3, version_id_4], k=2)
version_to_save = list(set(version_ids) - set(version_to_delete_b1)) version_to_save = list(set(version_ids) - set(version_to_delete_b1))
for ver in version_to_delete_b1: for ver in version_to_delete_b1:
s3_client.delete_object(bucket, obj_key, ver) s3_client.delete_object(bucket, obj_key, ver)
@ -746,6 +776,70 @@ class TestS3GateObject:
object_lock_retain_until_date=date_obj, object_lock_retain_until_date=date_obj,
) )
@allure.title("Delete object & delete objects (s3_client={s3_client})")
def test_s3_api_delete(
self,
s3_client: S3ClientWrapper,
two_buckets: list[str],
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
From second bucket some objects deleted all at once.
"""
max_obj_count = 20
max_delete_objects = 17
put_objects = []
file_paths = []
obj_sizes = [simple_object_size, complex_object_size]
bucket_1, bucket_2 = two_buckets
with reporter.step(f"Generate {max_obj_count} files"):
for _ in range(max_obj_count):
test_file = generate_file(random.choice(obj_sizes).value)
file_paths.append(test_file)
put_objects.append(s3_helper.object_key_from_file_path(test_file.path))
for i, bucket in enumerate([bucket_1, bucket_2], 1):
with reporter.step(f"Put {max_obj_count} objects into bucket_{i}"):
for file_path in file_paths:
s3_client.put_object(bucket, file_path)
with reporter.step(f"Check all objects put in bucket_{i} successfully"):
bucket_objects = s3_client.list_objects_v2(bucket)
assert set(put_objects) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
with reporter.step("Delete some objects from bucket_1 one by one"):
objects_to_delete_b1 = random.sample(put_objects, k=max_delete_objects)
for obj in objects_to_delete_b1:
s3_client.delete_object(bucket_1, obj)
with reporter.step("Check deleted objects are not visible in bucket bucket_1"):
bucket_objects = s3_client.list_objects_v2(bucket_1)
assert set(put_objects).difference(set(objects_to_delete_b1)) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
for object_key in objects_to_delete_b1:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_1, object_key)
with reporter.step("Delete some objects from bucket_2 at once"):
objects_to_delete_b2 = random.sample(put_objects, k=max_delete_objects)
s3_client.delete_objects(bucket_2, objects_to_delete_b2)
with reporter.step("Check deleted objects are not visible in bucket bucket_2"):
objects_list = s3_client.list_objects_v2(bucket_2)
assert set(put_objects).difference(set(objects_to_delete_b2)) == set(
objects_list
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
for object_key in objects_to_delete_b2:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_2, object_key)
@allure.title("Sync directory (sync_type={sync_type}, s3_client={s3_client})") @allure.title("Sync directory (sync_type={sync_type}, s3_client={s3_client})")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True) @pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
@pytest.mark.parametrize("sync_type", ["sync", "cp"]) @pytest.mark.parametrize("sync_type", ["sync", "cp"])
@ -793,12 +887,10 @@ class TestS3GateObject:
temp_directory, temp_directory,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
): ):
path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)]) path = "/".join(["".join(random.choices(string.ascii_letters, k=3)) for _ in range(10)])
file_path_1 = TestFile(os.path.join(temp_directory, path, "test_file_1")) file_path_1 = TestFile(os.path.join(temp_directory, path, "test_file_1"))
generate_file_with_content(simple_object_size.value, file_path=file_path_1) generate_file_with_content(simple_object_size.value, file_path=file_path_1)
file_name = s3_helper.object_key_from_file_path(file_path_1) file_name = s3_helper.object_key_from_file_path(file_path_1)
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put object"): with reporter.step("Put object"):
s3_client.put_object(bucket, file_path_1) s3_client.put_object(bucket, file_path_1)
@ -808,15 +900,10 @@ class TestS3GateObject:
def test_s3_delete_non_existing_object(self, s3_client: S3ClientWrapper, bucket: str): def test_s3_delete_non_existing_object(self, s3_client: S3ClientWrapper, bucket: str):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
objects_list = s3_client.list_objects_versions(bucket)
with reporter.step("Check that bucket is empty"):
assert not objects_list, f"Expected empty bucket, got {objects_list}"
obj_key = "fake_object_key" obj_key = "fake_object_key"
with reporter.step("Delete non-existing object"): with reporter.step("Delete non-existing object"):
delete_obj = s3_client.delete_object(bucket, obj_key) delete_obj = s3_client.delete_object(bucket, obj_key)
# there should be no objects or delete markers in the bucket
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created" assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
objects_list = s3_client.list_objects_versions(bucket) objects_list = s3_client.list_objects_versions(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}" assert not objects_list, f"Expected empty bucket, got {objects_list}"
@ -824,9 +911,6 @@ class TestS3GateObject:
@allure.title("Delete the same object twice (s3_client={s3_client})") @allure.title("Delete the same object twice (s3_client={s3_client})")
def test_s3_delete_twice(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize): def test_s3_delete_twice(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
objects_list = s3_client.list_objects(bucket)
with reporter.step("Check that bucket is empty"):
assert not objects_list, f"Expected empty bucket, got {objects_list}"
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
@ -839,7 +923,7 @@ class TestS3GateObject:
versions = s3_client.list_objects_versions(bucket) versions = s3_client.list_objects_versions(bucket)
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == file_name} obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == file_name}
assert obj_versions, f"Object versions were not found {objects_list}" assert obj_versions, f"Object versions were not found {versions}"
assert "DeleteMarker" in delete_object.keys(), "Delete markers not found" assert "DeleteMarker" in delete_object.keys(), "Delete markers not found"
with reporter.step("Delete the object from the bucket again"): with reporter.step("Delete the object from the bucket again"):

View file

@ -88,9 +88,8 @@ class TestS3GatePolicy(ClusterTestBase):
s3_client.create_bucket(location_constraint="UNEXISTING LOCATION CONSTRAINT") s3_client.create_bucket(location_constraint="UNEXISTING LOCATION CONSTRAINT")
@allure.title("Bucket policy (s3_client={s3_client})") @allure.title("Bucket policy (s3_client={s3_client})")
def test_s3_bucket_policy(self, s3_client: S3ClientWrapper): def test_s3_bucket_policy(self, s3_client: S3ClientWrapper, bucket: str):
with reporter.step("Create bucket"): with reporter.step("Create bucket"):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with reporter.step("GetBucketPolicy"): with reporter.step("GetBucketPolicy"):
@ -112,11 +111,10 @@ class TestS3GatePolicy(ClusterTestBase):
} }
], ],
} }
s3_client.put_bucket_policy(bucket, custom_policy) s3_client.put_bucket_policy(bucket, custom_policy)
with reporter.step("GetBucketPolicy"): with reporter.step("GetBucketPolicy"):
returned_policy = json.loads(s3_client.get_bucket_policy(bucket)) returned_policy = json.loads(s3_client.get_bucket_policy(bucket))
assert returned_policy == custom_policy, "Wrong policy was received" assert returned_policy == custom_policy, "Wrong policy was received"
with reporter.step("Delete the policy"): with reporter.step("Delete the policy"):
@ -127,9 +125,8 @@ class TestS3GatePolicy(ClusterTestBase):
s3_client.get_bucket_policy(bucket) s3_client.get_bucket_policy(bucket)
@allure.title("Bucket CORS (s3_client={s3_client})") @allure.title("Bucket CORS (s3_client={s3_client})")
def test_s3_cors(self, s3_client: S3ClientWrapper): def test_s3_cors(self, s3_client: S3ClientWrapper, bucket: str):
with reporter.step("Create bucket without cors"): with reporter.step("Create bucket without cors"):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with pytest.raises(Exception): with pytest.raises(Exception):

View file

@ -1,10 +1,12 @@
import os
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content, get_file_content
@pytest.mark.s3_gate @pytest.mark.s3_gate
@ -16,6 +18,69 @@ class TestS3GateVersioning:
with pytest.raises(Exception): with pytest.raises(Exception):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED)
@allure.title("Object versioning (s3_client={s3_client})")
def test_s3_api_versioning(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
"""
Test checks basic versioning functionality for S3 bucket.
"""
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with reporter.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple)
generate_file_with_content(simple_object_size.value, file_path=file_name_simple, content=version_2_content)
version_id_2 = s3_client.put_object(bucket, file_name_simple)
with reporter.step("Check bucket shows all versions"):
versions = s3_client.list_objects_versions(bucket)
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
assert obj_versions == {
version_id_1,
version_id_2,
}, f"Expected object has versions: {version_id_1, version_id_2}"
with reporter.step("Show information about particular version"):
for version_id in (version_id_1, version_id_2):
response = s3_client.head_object(bucket, obj_key, version_id=version_id)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert response.get("VersionId") == version_id, f"Expected VersionId is {version_id}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
with reporter.step("Check object's attributes"):
for version_id in (version_id_1, version_id_2):
got_attrs = s3_client.get_object_attributes(bucket, obj_key, ["ETag"], version_id=version_id)
if got_attrs:
assert got_attrs.get("VersionId") == version_id, f"Expected VersionId is {version_id}"
with reporter.step("Delete object and check it was deleted"):
response = s3_client.delete_object(bucket, obj_key)
version_id_delete = response.get("VersionId")
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_object(bucket, obj_key)
with reporter.step("Get content for all versions and check it is correct"):
for version, content in (
(version_id_2, version_2_content),
(version_id_1, version_1_content),
):
file_name = s3_client.get_object(bucket, obj_key, version_id=version)
got_content = get_file_content(file_name)
assert got_content == content, f"Expected object content is\n{content}\nGot\n{got_content}"
with reporter.step("Restore previous object version"):
s3_client.delete_object(bucket, obj_key, version_id=version_id_delete)
file_name = s3_client.get_object(bucket, obj_key)
got_content = get_file_content(file_name)
assert (
got_content == version_2_content
), f"Expected object content is\n{version_2_content}\nGot\n{got_content}"
@allure.title("Enable and disable versioning without object_lock (s3_client={s3_client})") @allure.title("Enable and disable versioning without object_lock (s3_client={s3_client})")
def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize): def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)

View file

@ -23,7 +23,6 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
class TestLogs: class TestLogs:
@allure.title("Check logs from frostfs-testcases with marks '{request.config.option.markexpr}'") @allure.title("Check logs from frostfs-testcases with marks '{request.config.option.markexpr}'")
@pytest.mark.order(1000) @pytest.mark.order(1000)
@pytest.mark.no_healthcheck
def test_logs_after_session( def test_logs_after_session(
self, temp_directory: str, cluster: Cluster, session_start_time: datetime, request: pytest.FixtureRequest self, temp_directory: str, cluster: Cluster, session_start_time: datetime, request: pytest.FixtureRequest
): ):