From b999d7cf9b6d04f881cac23971244695d248d420 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 26 Nov 2024 19:42:53 +0300 Subject: [PATCH] [#331] Remove basic-acl from failovers, http and shards tests Signed-off-by: a.berezin --- pytest_tests/helpers/__init__.py | 0 pytest_tests/helpers/container_request.py | 21 ++ pytest_tests/helpers/policy_validation.py | 3 +- .../failovers/test_failover_server.py | 51 ++-- .../failovers/test_failover_storage.py | 26 +- .../test_frostfs_failover_network.py | 45 ++-- .../management/test_node_management.py | 114 ++++----- .../services/http_gate/test_http_bearer.py | 43 ++-- .../services/http_gate/test_http_gate.py | 239 ++++++------------ .../services/http_gate/test_http_headers.py | 62 ++--- .../services/http_gate/test_http_object.py | 63 ++--- .../services/http_gate/test_http_streaming.py | 36 +-- .../http_gate/test_http_system_header.py | 113 ++++----- pytest_tests/testsuites/shard/__init__.py | 0 .../testsuites/shard/test_control_shard.py | 173 +++++++------ 15 files changed, 422 insertions(+), 567 deletions(-) create mode 100644 pytest_tests/helpers/__init__.py create mode 100644 pytest_tests/testsuites/shard/__init__.py diff --git a/pytest_tests/helpers/__init__.py b/pytest_tests/helpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pytest_tests/helpers/container_request.py b/pytest_tests/helpers/container_request.py index 5a42aee..f45ec20 100644 --- a/pytest_tests/helpers/container_request.py +++ b/pytest_tests/helpers/container_request.py @@ -68,6 +68,27 @@ class MultipleContainersRequest(list[ContainerRequest]): PUBLIC_WITH_POLICY = partial(ContainerRequest, ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="Custom_policy_with_allow_all_ape_rule") + +# REPS +REP_1_1_1 = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" + +REP_2_1_2 = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" +REP_2_1_4 = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" + +REP_2_2_2 = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" +REP_2_2_4 = "REP 2 IN X CBF 2 SELECT 4 FROM * AS X" +# + +# Public means it has APE rule which allows everything for everyone +REP_1_1_1_PUBLIC = PUBLIC_WITH_POLICY(REP_1_1_1, short_name="REP 1 CBF 1 SELECT 1 (public)") + +REP_2_1_2_PUBLIC = PUBLIC_WITH_POLICY(REP_2_1_2, short_name="REP 2 CBF 1 SELECT 2 (public)") +REP_2_1_4_PUBLIC = PUBLIC_WITH_POLICY(REP_2_1_4, short_name="REP 2 CBF 1 SELECT 4 (public)") + +REP_2_2_2_PUBLIC = PUBLIC_WITH_POLICY(REP_2_2_2, short_name="REP 2 CBF 2 SELECT 2 (public)") +REP_2_2_4_PUBLIC = PUBLIC_WITH_POLICY(REP_2_2_4, short_name="REP 2 CBF 2 SELECT 4 (public)") +# + EVERYONE_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="Everyone_Allow_All") OWNER_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_OWNER_ALLOW_ALL, short_name="Owner_Allow_All") PRIVATE = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=[], short_name="Private_No_APE") diff --git a/pytest_tests/helpers/policy_validation.py b/pytest_tests/helpers/policy_validation.py index ab18ab5..e1c9fda 100644 --- a/pytest_tests/helpers/policy_validation.py +++ b/pytest_tests/helpers/policy_validation.py @@ -1,7 +1,8 @@ from frostfs_testlib.shell.interfaces import Shell from frostfs_testlib.steps.cli.container import get_container from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo -from workspace.frostfs_testcases.pytest_tests.helpers.utility import placement_policy_from_container + +from ..helpers.utility import placement_policy_from_container def validate_object_policy(wallet: str, shell: Shell, placement_rule: str, cid: str, endpoint: str): diff --git a/pytest_tests/testsuites/failovers/test_failover_server.py b/pytest_tests/testsuites/failovers/test_failover_server.py index 47a95de..b9d6afa 100644 --- a/pytest_tests/testsuites/failovers/test_failover_server.py +++ b/pytest_tests/testsuites/failovers/test_failover_server.py @@ -6,11 +6,11 @@ import random import allure import pytest from frostfs_testlib import reporter -from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, put_object from frostfs_testlib.steps.node_management import check_node_in_map, check_node_not_in_map -from frostfs_testlib.storage.cluster import ClusterNode, StorageNode +from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers import ClusterStateController from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo @@ -21,6 +21,9 @@ from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.file_utils import get_file_hash from pytest import FixtureRequest +from ...helpers.container_creation import create_container_with_ape +from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest + logger = logging.getLogger("NeoLogger") @@ -41,18 +44,21 @@ class TestFailoverServer(ClusterTestBase): self, request: FixtureRequest, default_wallet: WalletInfo, + frostfs_cli: FrostfsCli, + cluster: Cluster, ) -> list[StorageContainer]: placement_rule = "REP 2 CBF 2 SELECT 2 FROM *" - + container_request = ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL) containers_count = request.param results = parallel( - [create_container for _ in range(containers_count)], + [create_container_with_ape for _ in range(containers_count)], + container_request=container_request, + frostfs_cli=frostfs_cli, wallet=default_wallet, shell=self.shell, + cluster=cluster, endpoint=self.cluster.default_rpc_endpoint, - rule=placement_rule, - basic_acl=PUBLIC_ACL, ) containers = [ @@ -63,17 +69,18 @@ class TestFailoverServer(ClusterTestBase): @allure.title("[Test] Create container") @pytest.fixture() - def container(self, default_wallet: WalletInfo) -> StorageContainer: + def container(self, default_wallet: WalletInfo, frostfs_cli: FrostfsCli) -> StorageContainer: select = len(self.cluster.cluster_nodes) placement_rule = f"REP {select - 1} CBF 1 SELECT {select} FROM *" - cont_id = create_container( + cid = create_container_with_ape( + ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL), + frostfs_cli, default_wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=placement_rule, - basic_acl=PUBLIC_ACL, + self.shell, + self.cluster, + self.cluster.default_rpc_endpoint, ) - storage_cont_info = StorageContainerInfo(cont_id, default_wallet) + storage_cont_info = StorageContainerInfo(cid, default_wallet) return StorageContainer(storage_cont_info, self.shell, self.cluster) @allure.title("[Class] Create objects") @@ -127,7 +134,7 @@ class TestFailoverServer(ClusterTestBase): parallel(self._verify_object, storage_objects * len(nodes), node=itertools.cycle(nodes)) @allure.title("Full shutdown node") - @pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True) + @pytest.mark.parametrize("containers, storage_objects", [(4, 5)], indirect=True) def test_complete_node_shutdown( self, storage_objects: list[StorageObjectInfo], @@ -221,17 +228,19 @@ class TestFailoverServer(ClusterTestBase): self, default_wallet: WalletInfo, cluster_state_controller: ClusterStateController, + frostfs_cli: FrostfsCli, simple_file: str, ): with reporter.step("Create container with full network map"): node_count = len(self.cluster.cluster_nodes) placement_rule = f"REP {node_count - 2} IN X CBF 2 SELECT {node_count} FROM * AS X" - cid = create_container( + cid = create_container_with_ape( + ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL), + frostfs_cli, default_wallet, self.shell, + self.cluster, self.cluster.default_rpc_endpoint, - rule=placement_rule, - basic_acl=PUBLIC_ACL, ) with reporter.step("Put object"): @@ -255,10 +264,4 @@ class TestFailoverServer(ClusterTestBase): get_object(default_wallet, cid, oid_2, self.shell, alive_endpoint_with_object) with reporter.step("Create container on alive node"): - create_container( - default_wallet, - self.shell, - alive_endpoint_with_object, - rule=placement_rule, - basic_acl=PUBLIC_ACL, - ) + create_container(default_wallet, self.shell, alive_endpoint_with_object, placement_rule) diff --git a/pytest_tests/testsuites/failovers/test_failover_storage.py b/pytest_tests/testsuites/failovers/test_failover_storage.py index f99333f..2d1a86b 100644 --- a/pytest_tests/testsuites/failovers/test_failover_storage.py +++ b/pytest_tests/testsuites/failovers/test_failover_storage.py @@ -7,7 +7,6 @@ import allure import pytest from frostfs_testlib import reporter from frostfs_testlib.resources.common import MORPH_BLOCK_TIME -from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container @@ -34,6 +33,7 @@ from frostfs_testlib.utils.failover_utils import wait_object_replication from frostfs_testlib.utils.file_keeper import FileKeeper from frostfs_testlib.utils.file_utils import generate_file, get_file_hash +from ...helpers.container_request import REP_2_2_2_PUBLIC, requires_container from ...resources.common import S3_POLICY_FILE_LOCATION logger = logging.getLogger("NeoLogger") @@ -54,32 +54,26 @@ class TestFailoverStorage(ClusterTestBase): @allure.title("Shutdown and start node (stop_mode={stop_mode})") @pytest.mark.parametrize("stop_mode", ["hard", "soft"]) @pytest.mark.failover_reboot + @requires_container(REP_2_2_2_PUBLIC) def test_lose_storage_node_host( self, default_wallet, stop_mode: str, + container: str, require_multiple_hosts, simple_object_size: ObjectSize, cluster: Cluster, cluster_state_controller: ClusterStateController, ): wallet = default_wallet - placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" source_file_path = generate_file(simple_object_size.value) stopped_hosts_nodes = [] - with reporter.step(f"Create container and put object"): - cid = create_container( - wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=placement_rule, - basic_acl=PUBLIC_ACL, - ) - oid = put_object_to_random_node(wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster) + with reporter.step(f"Put object"): + oid = put_object_to_random_node(wallet, source_file_path, container, shell=self.shell, cluster=self.cluster) with reporter.step(f"Wait for replication and get nodes with object"): - nodes_with_object = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes) + nodes_with_object = wait_object_replication(container, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes) with reporter.step(f"Stop 2 nodes with object and wait replication one by one"): for storage_node in random.sample(nodes_with_object, 2): @@ -89,7 +83,7 @@ class TestFailoverStorage(ClusterTestBase): cluster_state_controller.stop_node_host(cluster_node, stop_mode) replicated_nodes = wait_object_replication( - cid, + container, oid, 2, shell=self.shell, @@ -97,15 +91,15 @@ class TestFailoverStorage(ClusterTestBase): ) with reporter.step("Check object data is not corrupted"): - got_file_path = get_object(wallet, cid, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell) + got_file_path = get_object(wallet, container, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell) assert get_file_hash(source_file_path) == get_file_hash(got_file_path) with reporter.step("Return all hosts"): cluster_state_controller.start_stopped_hosts() with reporter.step("Check object data is not corrupted"): - replicated_nodes = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes) - got_file_path = get_object(wallet, cid, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint()) + replicated_nodes = wait_object_replication(container, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes) + got_file_path = get_object(wallet, container, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint()) assert get_file_hash(source_file_path) == get_file_hash(got_file_path) @pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True) diff --git a/pytest_tests/testsuites/failovers/test_frostfs_failover_network.py b/pytest_tests/testsuites/failovers/test_frostfs_failover_network.py index e0e978d..648ff4d 100644 --- a/pytest_tests/testsuites/failovers/test_frostfs_failover_network.py +++ b/pytest_tests/testsuites/failovers/test_frostfs_failover_network.py @@ -6,10 +6,8 @@ import allure import pytest from frostfs_testlib import reporter from frostfs_testlib.healthcheck.interfaces import Healthcheck -from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE, PUBLIC_ACL from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, neo_go_query_height, put_object, put_object_to_random_node -from frostfs_testlib.steps.storage_object import delete_objects from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers import ClusterStateController from frostfs_testlib.storage.dataclasses.object_size import ObjectSize @@ -20,6 +18,8 @@ from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.utils.failover_utils import wait_object_replication from frostfs_testlib.utils.file_utils import generate_file, get_file_hash +from ...helpers.container_request import PUBLIC_WITH_POLICY, REP_2_2_2_PUBLIC, requires_container + logger = logging.getLogger("NeoLogger") STORAGE_NODE_COMMUNICATION_PORT = "8080" STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082" @@ -59,23 +59,13 @@ class TestFailoverNetwork(ClusterTestBase): def storage_objects( self, simple_object_size: ObjectSize, + container: str, default_wallet: WalletInfo, ) -> list[StorageObjectInfo]: file_path = generate_file(simple_object_size.value) file_hash = get_file_hash(file_path) - with reporter.step("Create container"): - placement_rule = "REP 1 CBF 1" - cid = create_container( - wallet=default_wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=placement_rule, - await_mode=True, - basic_acl=EACL_PUBLIC_READ_WRITE, - ) - storage_objects = [] with reporter.step("Put object"): @@ -83,12 +73,12 @@ class TestFailoverNetwork(ClusterTestBase): oid = put_object_to_random_node( wallet=default_wallet, path=file_path, - cid=cid, + cid=container, shell=self.shell, cluster=self.cluster, ) - storage_object = StorageObjectInfo(cid=cid, oid=oid) + storage_object = StorageObjectInfo(cid=container, oid=oid) storage_object.size = simple_object_size.value storage_object.wallet = default_wallet storage_object.file_path = file_path @@ -100,9 +90,11 @@ class TestFailoverNetwork(ClusterTestBase): return storage_objects @allure.title("Block Storage node traffic") + @requires_container(REP_2_2_2_PUBLIC) def test_block_storage_node_traffic( self, default_wallet: WalletInfo, + container: str, require_multiple_hosts, simple_object_size: ObjectSize, cluster_state_controller: ClusterStateController, @@ -111,21 +103,13 @@ class TestFailoverNetwork(ClusterTestBase): Block storage nodes traffic using iptables and wait for replication for objects. """ wallet = default_wallet - placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked nodes_to_block_count = 2 source_file_path = generate_file(simple_object_size.value) - cid = create_container( - wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=placement_rule, - basic_acl=PUBLIC_ACL, - ) - oid = put_object_to_random_node(wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster) + oid = put_object_to_random_node(wallet, source_file_path, container, self.shell, self.cluster) - nodes = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes) + nodes = wait_object_replication(container, oid, 2, self.shell, self.cluster.storage_nodes) logger.info(f"Nodes are {nodes}") nodes_to_block = nodes @@ -147,7 +131,7 @@ class TestFailoverNetwork(ClusterTestBase): with reporter.step(f"Check object is not stored on node {node}"): new_nodes = wait_object_replication( - cid, + container, oid, 2, shell=self.shell, @@ -156,7 +140,7 @@ class TestFailoverNetwork(ClusterTestBase): assert node.storage_node not in new_nodes with reporter.step("Check object data is not corrupted"): - got_file_path = get_object(wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell) + got_file_path = get_object(wallet, container, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell) assert get_file_hash(source_file_path) == get_file_hash(got_file_path) with reporter.step(f"Unblock incoming traffic"): @@ -170,13 +154,14 @@ class TestFailoverNetwork(ClusterTestBase): sleep(wakeup_node_timeout) with reporter.step("Check object data is not corrupted"): - new_nodes = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes) + new_nodes = wait_object_replication(container, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes) - got_file_path = get_object(wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint()) + got_file_path = get_object(wallet, container, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint()) assert get_file_hash(source_file_path) == get_file_hash(got_file_path) @pytest.mark.interfaces @allure.title("Block DATA interface node") + @requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1 CBF 1")) def test_block_data_interface( self, cluster_state_controller: ClusterStateController, @@ -284,7 +269,7 @@ class TestFailoverNetwork(ClusterTestBase): ) with reporter.step(f"Get object nodes with object, expect true"): - input_file = get_object( + _ = get_object( wallet=default_wallet, cid=storage_object.cid, oid=storage_object.oid, diff --git a/pytest_tests/testsuites/management/test_node_management.py b/pytest_tests/testsuites/management/test_node_management.py index cae2f44..66fe9b3 100644 --- a/pytest_tests/testsuites/management/test_node_management.py +++ b/pytest_tests/testsuites/management/test_node_management.py @@ -1,7 +1,7 @@ import logging import random from time import sleep -from typing import Callable, Optional, Tuple +from typing import Callable, Optional import allure import pytest @@ -10,7 +10,6 @@ from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND -from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container from frostfs_testlib.steps.cli.object import ( delete_object, @@ -44,6 +43,8 @@ from frostfs_testlib.utils import string_utils from frostfs_testlib.utils.failover_utils import wait_object_replication from frostfs_testlib.utils.file_utils import generate_file +from ...helpers.container_creation import create_container_with_ape +from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, REP_1_1_1_PUBLIC, ContainerRequest, requires_container from ...helpers.utility import wait_for_gc_pass_on_storage_nodes logger = logging.getLogger("NeoLogger") @@ -55,26 +56,16 @@ check_nodes: list[StorageNode] = [] @pytest.mark.order(10) class TestNodeManagement(ClusterTestBase): @pytest.fixture - @allure.title("Create container and pick the node with data") - def create_container_and_pick_node(self, default_wallet: WalletInfo, simple_object_size: ObjectSize) -> Tuple[str, StorageNode]: + @allure.title("Pick the node with data") + def node_with_data(self, container: str, default_wallet: WalletInfo, simple_object_size: ObjectSize) -> StorageNode: file_path = generate_file(simple_object_size.value) - placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" - endpoint = self.cluster.default_rpc_endpoint + oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster) - cid = create_container( - default_wallet, - shell=self.shell, - endpoint=endpoint, - rule=placement_rule, - basic_acl=PUBLIC_ACL, - ) - oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, self.cluster) - - nodes = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) + nodes = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes) assert len(nodes) == 1 node = nodes[0] - yield cid, node + yield node shards = node_shard_list(node) assert shards @@ -126,6 +117,7 @@ class TestNodeManagement(ClusterTestBase): self, default_wallet: WalletInfo, simple_object_size: ObjectSize, + frostfs_cli: FrostfsCli, return_nodes_after_test_run, ): """ @@ -147,20 +139,16 @@ class TestNodeManagement(ClusterTestBase): exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster) delete_node_data(random_node) - cid = create_container( - wallet, - rule=placement_rule_3, - basic_acl=PUBLIC_ACL, - shell=self.shell, - endpoint=alive_node.get_rpc_endpoint(), - ) - oid = put_object( - wallet, - source_file_path, - cid, - shell=self.shell, - endpoint=alive_node.get_rpc_endpoint(), + cid = create_container_with_ape( + ContainerRequest(placement_rule_3, APE_EVERYONE_ALLOW_ALL), + frostfs_cli, + default_wallet, + self.shell, + self.cluster, + alive_node.get_rpc_endpoint(), ) + + oid = put_object(wallet, source_file_path, cid, self.shell, alive_node.get_rpc_endpoint()) wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes) self.return_nodes(alive_node) @@ -182,12 +170,13 @@ class TestNodeManagement(ClusterTestBase): wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes) with reporter.step("Check container could be created with new node"): - cid = create_container( - wallet, - rule=placement_rule_4, - basic_acl=PUBLIC_ACL, - shell=self.shell, - endpoint=alive_node.get_rpc_endpoint(), + cid = create_container_with_ape( + ContainerRequest(placement_rule_4, APE_EVERYONE_ALLOW_ALL), + frostfs_cli, + default_wallet, + self.shell, + self.cluster, + alive_node.get_rpc_endpoint(), ) oid = put_object( wallet, @@ -231,48 +220,53 @@ class TestNodeManagement(ClusterTestBase): @pytest.mark.skip(reason="Need to clarify scenario") @allure.title("Control Operations with storage nodes") + @requires_container(REP_1_1_1_PUBLIC) def test_shards( self, - default_wallet, - create_container_and_pick_node, + default_wallet: WalletInfo, + container: str, + node_with_data: StorageNode, simple_object_size: ObjectSize, ): wallet = default_wallet file_path = generate_file(simple_object_size.value) - cid, node = create_container_and_pick_node - original_oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster) + original_oid = put_object_to_random_node(wallet, file_path, container, self.shell, self.cluster) # for mode in ('read-only', 'degraded'): for mode in ("degraded",): - shards = node_shard_list(node) + shards = node_shard_list(node_with_data) assert shards for shard in shards: - node_shard_set_mode(node, shard, mode) + node_shard_set_mode(node_with_data, shard, mode) - shards = node_shard_list(node) + shards = node_shard_list(node_with_data) assert shards + # TODO: Add match for error with pytest.raises(RuntimeError): - put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster) + put_object_to_random_node(wallet, file_path, container, self.shell, self.cluster) + # TODO: Add match for error with pytest.raises(RuntimeError): - delete_object(wallet, cid, original_oid, self.shell, self.cluster.default_rpc_endpoint) + delete_object(wallet, container, original_oid, self.shell, self.cluster.default_rpc_endpoint) - get_object_from_random_node(wallet, cid, original_oid, self.shell, self.cluster) + get_object_from_random_node(wallet, container, original_oid, self.shell, self.cluster) for shard in shards: - node_shard_set_mode(node, shard, "read-write") + node_shard_set_mode(node_with_data, shard, "read-write") - shards = node_shard_list(node) + shards = node_shard_list(node_with_data) assert shards - oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster) - delete_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint) + oid = put_object_to_random_node(wallet, file_path, container, self.shell, self.cluster) + delete_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint) @allure.title("Put object with stopped node") - def test_stop_node(self, default_wallet, return_nodes_after_test_run, simple_object_size: ObjectSize): + def test_stop_node( + self, default_wallet: WalletInfo, frostfs_cli: FrostfsCli, return_nodes_after_test_run, simple_object_size: ObjectSize + ): wallet = default_wallet placement_rule = "REP 3 IN X SELECT 4 FROM * AS X" source_file_path = generate_file(simple_object_size.value) @@ -280,16 +274,20 @@ class TestNodeManagement(ClusterTestBase): random_node = random.choice(storage_nodes[1:]) alive_node = random.choice([storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]) - cid = create_container( - wallet, - rule=placement_rule, - basic_acl=PUBLIC_ACL, - shell=self.shell, - endpoint=random_node.get_rpc_endpoint(), - ) + with reporter.step("Create container from random node endpoint"): + cid = create_container_with_ape( + ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL), + frostfs_cli, + default_wallet, + self.shell, + self.cluster, + random_node.get_rpc_endpoint(), + ) + with reporter.step("Stop the random node"): check_nodes.append(random_node) random_node.stop_service() + with reporter.step("Try to put an object and expect success"): put_object( wallet, diff --git a/pytest_tests/testsuites/services/http_gate/test_http_bearer.py b/pytest_tests/testsuites/services/http_gate/test_http_bearer.py index 4e20022..930137e 100644 --- a/pytest_tests/testsuites/services/http_gate/test_http_bearer.py +++ b/pytest_tests/testsuites/services/http_gate/test_http_bearer.py @@ -4,9 +4,7 @@ import allure import pytest from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL from frostfs_testlib.steps.acl import bearer_token_base64_from_file -from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses import ape @@ -16,6 +14,7 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.utils.file_utils import generate_file from ....helpers.bearer_token import create_bearer_token +from ....helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest, requires_container logger = logging.getLogger("NeoLogger") @@ -24,55 +23,43 @@ logger = logging.getLogger("NeoLogger") @pytest.mark.http_put class Test_http_bearer(ClusterTestBase): PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" + OWNER_ROLE = ape.Condition.by_role(ape.Role.OWNER) + CUSTOM_APE_RULE = ape.Rule(ape.Verb.DENY, ape.ObjectOperations.PUT, OWNER_ROLE) - @pytest.fixture(scope="class") - def user_container(self, frostfs_cli: FrostfsCli, default_wallet: WalletInfo, cluster: Cluster) -> str: - with reporter.step("Create container"): - cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, self.PLACEMENT_RULE, PUBLIC_ACL) - - with reporter.step("Deny PUT via APE rule to container"): - role_condition = ape.Condition.by_role(ape.Role.OWNER) - rule = ape.Rule(ape.Verb.DENY, ape.ObjectOperations.PUT, role_condition) - frostfs_cli.ape_manager.add( - cluster.default_rpc_endpoint, rule.chain_id, target_name=cid, target_type="container", rule=rule.as_string() - ) - - with reporter.step("Wait for one block"): - self.wait_for_blocks() - - return cid - - @pytest.fixture(scope="class") - def bearer_token(self, frostfs_cli: FrostfsCli, user_container: str, temp_directory: str, cluster: Cluster) -> str: + @pytest.fixture() + def bearer_token(self, frostfs_cli: FrostfsCli, container: str, temp_directory: str, cluster: Cluster) -> str: with reporter.step(f"Create bearer token for {ape.Role.OTHERS} with all operations allowed"): role_condition = ape.Condition.by_role(ape.Role.OTHERS) rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, role_condition) - bearer = create_bearer_token(frostfs_cli, temp_directory, user_container, rule, cluster.default_rpc_endpoint) + bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, cluster.default_rpc_endpoint) return bearer_token_base64_from_file(bearer) @allure.title(f"[NEGATIVE] Put object without bearer token for {ape.Role.OTHERS}") - def test_unable_put_without_bearer_token(self, simple_object_size: ObjectSize, user_container: str): + def test_unable_put_without_bearer_token(self, simple_object_size: ObjectSize, container: str): upload_via_http_gate_curl( - cid=user_container, + cid=container, filepath=generate_file(simple_object_size.value), endpoint=self.cluster.default_http_gate_endpoint, error_pattern="access to object operation denied", ) @allure.title("Put object via HTTP using bearer token (object_size={object_size})") - def test_put_with_bearer_when_eacl_restrict( + @requires_container( + ContainerRequest(PLACEMENT_RULE, [APE_EVERYONE_ALLOW_ALL, CUSTOM_APE_RULE], short_name="custom with denied owner put") + ) + def test_put_with_bearer_when_ape_restrict( self, object_size: ObjectSize, default_wallet: WalletInfo, - user_container: str, + container: str, bearer_token: str, ): file_path = generate_file(object_size.value) with reporter.step(f"Put object with bearer token for {ape.Role.OTHERS}, then get and verify hashes"): headers = [f" -H 'Authorization: Bearer {bearer_token}'"] oid = upload_via_http_gate_curl( - cid=user_container, + cid=container, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint, headers=headers, @@ -81,7 +68,7 @@ class Test_http_bearer(ClusterTestBase): oid=oid, file_name=file_path, wallet=default_wallet, - cid=user_container, + cid=container, shell=self.shell, nodes=self.cluster.storage_nodes, request_node=self.cluster.cluster_nodes[0], diff --git a/pytest_tests/testsuites/services/http_gate/test_http_gate.py b/pytest_tests/testsuites/services/http_gate/test_http_gate.py index 0561f5a..af521be 100644 --- a/pytest_tests/testsuites/services/http_gate/test_http_gate.py +++ b/pytest_tests/testsuites/services/http_gate/test_http_gate.py @@ -1,8 +1,6 @@ import allure import pytest from frostfs_testlib import reporter -from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL -from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.cli.object import put_object_to_random_node from frostfs_testlib.steps.epoch import get_epoch from frostfs_testlib.steps.http.http_gate import ( @@ -17,9 +15,11 @@ from frostfs_testlib.steps.http.http_gate import ( verify_object_hash, ) from frostfs_testlib.storage.dataclasses.object_size import ObjectSize +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.cluster_test_base import ClusterTestBase -from frostfs_testlib.utils.file_utils import generate_file, get_file_hash +from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash +from ....helpers.container_request import REP_1_1_1_PUBLIC, REP_2_2_2_PUBLIC, requires_container from ....helpers.utility import wait_for_gc_pass_on_storage_nodes OBJECT_NOT_FOUND_ERROR = "not found" @@ -35,65 +35,36 @@ OBJECT_NOT_FOUND_ERROR = "not found" @pytest.mark.sanity @pytest.mark.http_gate class TestHttpGate(ClusterTestBase): - PLACEMENT_RULE_1 = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" - PLACEMENT_RULE_2 = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" - - @pytest.fixture(scope="class", autouse=True) - @allure.title("[Class/Autouse]: Prepare wallet and deposit") - def prepare_wallet(self, default_wallet): - TestHttpGate.wallet = default_wallet - - @allure.title("Put over gRPC, Get over HTTP") - def test_put_grpc_get_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize): + @allure.title("Put over gRPC, Get over HTTP (object_size={object_size})") + @requires_container(REP_1_1_1_PUBLIC) + def test_put_grpc_get_http(self, default_wallet: WalletInfo, container: str, test_file: TestFile): """ Test that object can be put using gRPC interface and get using HTTP. Steps: - 1. Create simple and large objects. - 2. Put objects using gRPC (frostfs-cli). - 3. Download objects using HTTP gate (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading). - 4. Get objects using gRPC (frostfs-cli). - 5. Compare hashes for got objects. + 1. Create object. + 2. Put object using gRPC (frostfs-cli). + 3. Download object using HTTP gate (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading). + 4. Get object using gRPC (frostfs-cli). + 5. Compare hashes for got object. 6. Compare hashes for got and original objects. Expected result: Hashes must be the same. """ - cid = create_container( - self.wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=self.PLACEMENT_RULE_1, - basic_acl=PUBLIC_ACL, - ) - file_path_simple = generate_file(simple_object_size.value) - file_path_large = generate_file(complex_object_size.value) - with reporter.step("Put objects using gRPC"): - oid_simple = put_object_to_random_node( - wallet=self.wallet, - path=file_path_simple, - cid=cid, - shell=self.shell, - cluster=self.cluster, - ) - oid_large = put_object_to_random_node( - wallet=self.wallet, - path=file_path_large, - cid=cid, - shell=self.shell, - cluster=self.cluster, - ) + with reporter.step("Put object using gRPC"): + object_id = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster) - for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)): + with reporter.step("Get object and check hash"): verify_object_hash( - oid=oid, - file_name=file_path, - wallet=self.wallet, - cid=cid, - shell=self.shell, - nodes=self.cluster.storage_nodes, - request_node=self.cluster.cluster_nodes[0], + object_id, + test_file.path, + default_wallet, + container, + self.shell, + self.cluster.storage_nodes, + self.cluster.cluster_nodes[0], ) @@ -108,9 +79,10 @@ class TestHttpGate(ClusterTestBase): class TestHttpPut(ClusterTestBase): @allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading") @allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading") - @allure.title("Put over HTTP, Get over HTTP") + @allure.title("Put over HTTP, Get over HTTP (object_size={object_size})") @pytest.mark.smoke - def test_put_http_get_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize): + @requires_container(REP_2_2_2_PUBLIC) + def test_put_http_get_http(self, container: str, default_wallet: WalletInfo, test_file: TestFile): """ Test that object can be put and get using HTTP interface. @@ -123,29 +95,19 @@ class TestHttpPut(ClusterTestBase): Expected result: Hashes must be the same. """ - cid = create_container( - self.wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=self.PLACEMENT_RULE_2, - basic_acl=PUBLIC_ACL, - ) - file_path_simple = generate_file(simple_object_size.value) - file_path_large = generate_file(complex_object_size.value) - with reporter.step("Put objects using HTTP"): - oid_simple = upload_via_http_gate(cid=cid, path=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint) - oid_large = upload_via_http_gate(cid=cid, path=file_path_large, endpoint=self.cluster.default_http_gate_endpoint) + with reporter.step("Put object using HTTP"): + object_id = upload_via_http_gate(container, test_file.path, self.cluster.default_http_gate_endpoint) - for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)): + with reporter.step("Get object and check hash"): verify_object_hash( - oid=oid, - file_name=file_path, - wallet=self.wallet, - cid=cid, - shell=self.shell, - nodes=self.cluster.storage_nodes, - request_node=self.cluster.cluster_nodes[0], + object_id, + test_file.path, + default_wallet, + container, + self.shell, + self.cluster.storage_nodes, + self.cluster.cluster_nodes[0], ) @allure.link( @@ -162,7 +124,8 @@ class TestHttpPut(ClusterTestBase): ], ids=["simple", "hyphen", "percent"], ) - def test_put_http_get_http_with_headers(self, attributes: dict, simple_object_size: ObjectSize, id: str): + @requires_container(REP_2_2_2_PUBLIC) + def test_put_http_get_http_with_headers(self, container: str, attributes: dict, simple_object_size: ObjectSize, id: str): """ Test that object can be downloaded using different attributes in HTTP header. @@ -175,46 +138,27 @@ class TestHttpPut(ClusterTestBase): Expected result: Hashes must be the same. """ - cid = create_container( - self.wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=self.PLACEMENT_RULE_2, - basic_acl=PUBLIC_ACL, - ) file_path = generate_file(simple_object_size.value) with reporter.step("Put objects using HTTP with attribute"): headers = attr_into_header(attributes) - oid = upload_via_http_gate( - cid=cid, - path=file_path, - headers=headers, - endpoint=self.cluster.default_http_gate_endpoint, - ) + oid = upload_via_http_gate(container, file_path, self.cluster.default_http_gate_endpoint, headers) get_object_by_attr_and_verify_hashes( - oid=oid, - file_name=file_path, - cid=cid, - attrs=attributes, - node=self.cluster.cluster_nodes[0], + oid, + file_path, + container, + attributes, + self.cluster.cluster_nodes[0], ) @allure.title("Expiration-Epoch in HTTP header (epoch_gap={epoch_gap})") @pytest.mark.parametrize("epoch_gap", [0, 1]) - def test_expiration_epoch_in_http(self, simple_object_size: ObjectSize, epoch_gap: int): - endpoint = self.cluster.default_rpc_endpoint + @requires_container(REP_2_2_2_PUBLIC) + def test_expiration_epoch_in_http(self, container: str, simple_object_size: ObjectSize, epoch_gap: int): http_endpoint = self.cluster.default_http_gate_endpoint min_valid_epoch = get_epoch(self.shell, self.cluster) + epoch_gap - cid = create_container( - self.wallet, - shell=self.shell, - endpoint=endpoint, - rule=self.PLACEMENT_RULE_2, - basic_acl=PUBLIC_ACL, - ) file_path = generate_file(simple_object_size.value) oids_to_be_expired = [] oids_to_be_valid = [] @@ -225,7 +169,7 @@ class TestHttpPut(ClusterTestBase): with reporter.step("Put objects using HTTP with attribute Expiration-Epoch"): oid = upload_via_http_gate( - cid=cid, + cid=container, path=file_path, headers=headers, endpoint=http_endpoint, @@ -235,7 +179,7 @@ class TestHttpPut(ClusterTestBase): else: oids_to_be_expired.append(oid) with reporter.step("This object can be got"): - get_via_http_gate(cid=cid, oid=oid, node=self.cluster.cluster_nodes[0]) + get_via_http_gate(container, oid, self.cluster.cluster_nodes[0]) self.tick_epoch() @@ -245,24 +189,18 @@ class TestHttpPut(ClusterTestBase): for oid in oids_to_be_expired: with reporter.step(f"{oid} shall be expired and cannot be got"): try_to_get_object_and_expect_error( - cid=cid, + cid=container, oid=oid, node=self.cluster.cluster_nodes[0], error_pattern=OBJECT_NOT_FOUND_ERROR, ) for oid in oids_to_be_valid: with reporter.step(f"{oid} shall be valid and can be got"): - get_via_http_gate(cid=cid, oid=oid, node=self.cluster.cluster_nodes[0]) + get_via_http_gate(cid=container, oid=oid, node=self.cluster.cluster_nodes[0]) @allure.title("Zip in HTTP header") - def test_zip_in_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize): - cid = create_container( - self.wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=self.PLACEMENT_RULE_2, - basic_acl=PUBLIC_ACL, - ) + @requires_container(REP_2_2_2_PUBLIC) + def test_zip_in_http(self, container: str, complex_object_size: ObjectSize, simple_object_size: ObjectSize): file_path_simple = generate_file(simple_object_size.value) file_path_large = generate_file(complex_object_size.value) common_prefix = "my_files" @@ -271,45 +209,33 @@ class TestHttpPut(ClusterTestBase): headers2 = {"X-Attribute-FilePath": f"{common_prefix}/file2"} upload_via_http_gate( - cid=cid, + cid=container, path=file_path_simple, headers=headers1, endpoint=self.cluster.default_http_gate_endpoint, ) - upload_via_http_gate( - cid=cid, - path=file_path_large, - headers=headers2, - endpoint=self.cluster.default_http_gate_endpoint, - ) + upload_via_http_gate(container, file_path_large, headers2, self.cluster.default_http_gate_endpoint) + upload_via_http_gate(container, file_path_large, headers2, self.cluster.default_http_gate_endpoint) - dir_path = get_via_zip_http_gate(cid=cid, prefix=common_prefix, node=self.cluster.cluster_nodes[0]) + dir_path = get_via_zip_http_gate(cid=container, prefix=common_prefix, node=self.cluster.cluster_nodes[0]) with reporter.step("Verify hashes"): assert get_file_hash(f"{dir_path}/file1") == get_file_hash(file_path_simple) assert get_file_hash(f"{dir_path}/file2") == get_file_hash(file_path_large) - @pytest.mark.long @allure.title("Put over HTTP/Curl, Get over HTTP/Curl for large object") - def test_put_http_get_http_large_file(self, complex_object_size: ObjectSize): + @requires_container(REP_2_2_2_PUBLIC) + def test_put_http_get_http_large_file(self, default_wallet: WalletInfo, container: str, complex_object_size: ObjectSize): """ This test checks upload and download using curl with 'large' object. Large is object with size up to 20Mb. """ - cid = create_container( - self.wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=self.PLACEMENT_RULE_2, - basic_acl=PUBLIC_ACL, - ) - file_path = generate_file(complex_object_size.value) with reporter.step("Put objects using HTTP"): - oid_gate = upload_via_http_gate(cid=cid, path=file_path, endpoint=self.cluster.default_http_gate_endpoint) + oid_gate = upload_via_http_gate(cid=container, path=file_path, endpoint=self.cluster.default_http_gate_endpoint) oid_curl = upload_via_http_gate_curl( - cid=cid, + cid=container, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint, ) @@ -317,8 +243,8 @@ class TestHttpPut(ClusterTestBase): verify_object_hash( oid=oid_gate, file_name=file_path, - wallet=self.wallet, - cid=cid, + wallet=default_wallet, + cid=container, shell=self.shell, nodes=self.cluster.storage_nodes, request_node=self.cluster.cluster_nodes[0], @@ -326,45 +252,32 @@ class TestHttpPut(ClusterTestBase): verify_object_hash( oid=oid_curl, file_name=file_path, - wallet=self.wallet, - cid=cid, + wallet=default_wallet, + cid=container, shell=self.shell, nodes=self.cluster.storage_nodes, request_node=self.cluster.cluster_nodes[0], object_getter=get_via_http_curl, ) - @allure.title("Put/Get over HTTP using Curl utility") - def test_put_http_get_http_curl(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize): + @allure.title("Put/Get over HTTP using Curl utility (object_size={object_size})") + @requires_container(REP_2_2_2_PUBLIC) + def test_put_http_get_http_curl(self, default_wallet: WalletInfo, container: str, test_file: TestFile): """ Test checks upload and download over HTTP using curl utility. """ - cid = create_container( - self.wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=self.PLACEMENT_RULE_2, - basic_acl=PUBLIC_ACL, - ) - file_path_simple = generate_file(simple_object_size.value) - file_path_large = generate_file(complex_object_size.value) - with reporter.step("Put objects using curl utility"): - oid_simple = upload_via_http_gate_curl(cid=cid, filepath=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint) - oid_large = upload_via_http_gate_curl( - cid=cid, - filepath=file_path_large, - endpoint=self.cluster.default_http_gate_endpoint, - ) + with reporter.step("Put object using curl utility"): + object_id = upload_via_http_gate_curl(container, test_file.path, self.cluster.default_http_gate_endpoint) - for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)): + with reporter.step("Get object and check hash"): verify_object_hash( - oid=oid, - file_name=file_path, - wallet=self.wallet, - cid=cid, - shell=self.shell, - nodes=self.cluster.storage_nodes, - request_node=self.cluster.cluster_nodes[0], - object_getter=get_via_http_curl, + object_id, + test_file.path, + default_wallet, + container, + self.shell, + self.cluster.storage_nodes, + self.cluster.cluster_nodes[0], + get_via_http_curl, ) diff --git a/pytest_tests/testsuites/services/http_gate/test_http_headers.py b/pytest_tests/testsuites/services/http_gate/test_http_headers.py index 4681047..95db861 100644 --- a/pytest_tests/testsuites/services/http_gate/test_http_headers.py +++ b/pytest_tests/testsuites/services/http_gate/test_http_headers.py @@ -4,13 +4,7 @@ import os import allure import pytest from frostfs_testlib import reporter -from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL -from frostfs_testlib.steps.cli.container import ( - create_container, - delete_container, - list_containers, - wait_for_container_deletion, -) +from frostfs_testlib.steps.cli.container import delete_container, list_containers, wait_for_container_deletion from frostfs_testlib.steps.cli.object import delete_object from frostfs_testlib.steps.http.http_gate import ( attr_into_str_header_curl, @@ -21,9 +15,12 @@ from frostfs_testlib.steps.http.http_gate import ( ) from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.utils.file_utils import generate_file +from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container + OBJECT_ALREADY_REMOVED_ERROR = "object already removed" logger = logging.getLogger("NeoLogger") @@ -31,7 +28,6 @@ logger = logging.getLogger("NeoLogger") @pytest.mark.http_gate @pytest.mark.http_put class Test_http_headers(ClusterTestBase): - PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" obj1_keys = ["Writer", "Chapter1", "Chapter2"] obj2_keys = ["Writer", "Ch@pter1", "chapter2"] values = ["Leo Tolstoy", "peace", "w@r"] @@ -40,34 +36,23 @@ class Test_http_headers(ClusterTestBase): {obj2_keys[0]: values[0], obj2_keys[1]: values[1], obj2_keys[2]: values[2]}, ] - @pytest.fixture(scope="class", autouse=True) - @allure.title("[Class/Autouse]: Prepare wallet and deposit") - def prepare_wallet(self, default_wallet): - Test_http_headers.wallet = default_wallet - - def storage_objects_with_attributes(self, object_size: ObjectSize) -> list[StorageObjectInfo]: + @pytest.fixture + def storage_objects_with_attributes(self, container: str, wallet: WalletInfo, object_size: ObjectSize) -> list[StorageObjectInfo]: # TODO: Deal with http tests if object_size.value > 1000: pytest.skip("Complex objects for HTTP temporarly disabled for v0.37") storage_objects = [] - wallet = self.wallet - cid = create_container( - wallet=self.wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=self.PLACEMENT_RULE, - basic_acl=PUBLIC_ACL, - ) + file_path = generate_file(object_size.value) for attributes in self.OBJECT_ATTRIBUTES: storage_object_id = upload_via_http_gate_curl( - cid=cid, + cid=container, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint, headers=attr_into_str_header_curl(attributes), ) - storage_object = StorageObjectInfo(cid, storage_object_id) + storage_object = StorageObjectInfo(container, storage_object_id) storage_object.size = os.path.getsize(file_path) storage_object.wallet = wallet storage_object.file_path = file_path @@ -75,9 +60,10 @@ class Test_http_headers(ClusterTestBase): storage_objects.append(storage_object) - yield storage_objects + return storage_objects - @allure.title("Get object1 by attribute") + @allure.title("Get object1 by attribute (object_size={object_size})") + @requires_container(REP_2_1_4_PUBLIC) def test_object1_can_be_get_by_attr(self, storage_objects_with_attributes: list[StorageObjectInfo]): """ Test to get object#1 by attribute and comapre hashes @@ -99,8 +85,9 @@ class Test_http_headers(ClusterTestBase): node=self.cluster.cluster_nodes[0], ) - @allure.title("Get object2 with different attributes, then delete object2 and get object1") - def test_object2_can_be_get_by_attr(self, storage_objects_with_attributes: list[StorageObjectInfo]): + @allure.title("Get object2 with different attributes, then delete object2 and get object1 (object_size={object_size})") + @requires_container(REP_2_1_4_PUBLIC) + def test_object2_can_be_get_by_attr(self, default_wallet: WalletInfo, storage_objects_with_attributes: list[StorageObjectInfo]): """ Test to get object2 with different attributes, then delete object2 and get object1 using 1st attribute. Note: obj1 and obj2 have the same attribute#1, and when obj2 is deleted you can get obj1 by 1st attribute @@ -131,7 +118,7 @@ class Test_http_headers(ClusterTestBase): ) with reporter.step("Delete object#2 and verify is the container deleted"): delete_object( - wallet=self.wallet, + wallet=default_wallet, cid=storage_object_2.cid, oid=storage_object_2.oid, shell=self.shell, @@ -145,9 +132,7 @@ class Test_http_headers(ClusterTestBase): ) storage_objects_with_attributes.remove(storage_object_2) - with reporter.step( - f'Download object#1 with attributes [Writer={storage_object_1.attributes["Writer"]}] and compare hashes' - ): + with reporter.step(f'Download object#1 with attributes [Writer={storage_object_1.attributes["Writer"]}] and compare hashes'): key_value_pair = {"Writer": storage_object_1.attributes["Writer"]} get_object_by_attr_and_verify_hashes( oid=storage_object_1.oid, @@ -157,8 +142,9 @@ class Test_http_headers(ClusterTestBase): node=self.cluster.cluster_nodes[0], ) - @allure.title("[NEGATIVE] Put object and get right after container is deleted") - def test_negative_put_and_get_object3(self, storage_objects_with_attributes: list[StorageObjectInfo]): + @allure.title("[NEGATIVE] Put object and get right after container is deleted (object_size={object_size})") + @requires_container(REP_2_1_4_PUBLIC) + def test_negative_put_and_get_object3(self, default_wallet: WalletInfo, storage_objects_with_attributes: list[StorageObjectInfo]): """ Test to attempt to put object and try to download it right after the container has been deleted @@ -188,7 +174,7 @@ class Test_http_headers(ClusterTestBase): ) with reporter.step("Delete container and verify container deletion"): delete_container( - wallet=self.wallet, + wallet=default_wallet, cid=storage_object_1.cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, @@ -196,14 +182,12 @@ class Test_http_headers(ClusterTestBase): ) self.tick_epoch() wait_for_container_deletion( - self.wallet, + default_wallet, storage_object_1.cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, ) - assert storage_object_1.cid not in list_containers( - self.wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint - ) + assert storage_object_1.cid not in list_containers(default_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) with reporter.step("[Negative] Try to download (wget) object via wget with attributes [peace=peace]"): request = f"/get/{storage_object_1.cid}/peace/peace" error_pattern = "404 Not Found" diff --git a/pytest_tests/testsuites/services/http_gate/test_http_object.py b/pytest_tests/testsuites/services/http_gate/test_http_object.py index 0653c6e..8184e11 100644 --- a/pytest_tests/testsuites/services/http_gate/test_http_object.py +++ b/pytest_tests/testsuites/services/http_gate/test_http_object.py @@ -3,9 +3,7 @@ import logging import allure import pytest from frostfs_testlib import reporter -from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper -from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.cli.object import put_object_to_random_node from frostfs_testlib.steps.http.http_gate import ( assert_hashes_are_equal, @@ -15,9 +13,11 @@ from frostfs_testlib.steps.http.http_gate import ( verify_object_hash, ) from frostfs_testlib.steps.s3 import s3_helper -from frostfs_testlib.storage.dataclasses.object_size import ObjectSize +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.cluster_test_base import ClusterTestBase -from frostfs_testlib.utils.file_utils import generate_file +from frostfs_testlib.utils.file_utils import TestFile + +from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container logger = logging.getLogger("NeoLogger") @@ -26,15 +26,9 @@ logger = logging.getLogger("NeoLogger") @pytest.mark.sanity @pytest.mark.http_gate class Test_http_object(ClusterTestBase): - PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" - - @pytest.fixture(scope="class", autouse=True) - @allure.title("[Class/Autouse]: Prepare wallet and deposit") - def prepare_wallet(self, default_wallet): - Test_http_object.wallet = default_wallet - @allure.title("Put over gRPC, Get over HTTP with attributes (obj_size={object_size})") - def test_object_put_get_attributes(self, object_size: ObjectSize): + @requires_container(REP_2_1_4_PUBLIC) + def test_object_put_get_attributes(self, default_wallet: WalletInfo, container: str, test_file: TestFile): """ Test that object can be put using gRPC interface and got using HTTP. @@ -53,18 +47,6 @@ class Test_http_object(ClusterTestBase): Hashes must be the same. """ - with reporter.step("Create public container"): - cid = create_container( - self.wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=self.PLACEMENT_RULE, - basic_acl=PUBLIC_ACL, - ) - - # Generate file - file_path = generate_file(object_size.value) - # List of Key=Value attributes obj_key1 = "chapter1" obj_value1 = "peace" @@ -77,9 +59,9 @@ class Test_http_object(ClusterTestBase): with reporter.step("Put objects using gRPC [--attributes chapter1=peace,chapter2=war]"): oid = put_object_to_random_node( - wallet=self.wallet, - path=file_path, - cid=cid, + wallet=default_wallet, + path=test_file.path, + cid=container, shell=self.shell, cluster=self.cluster, attributes=f"{key_value1},{key_value2}", @@ -87,9 +69,9 @@ class Test_http_object(ClusterTestBase): with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"): verify_object_hash( oid=oid, - file_name=file_path, - wallet=self.wallet, - cid=cid, + file_name=test_file.path, + wallet=default_wallet, + cid=container, shell=self.shell, nodes=self.cluster.storage_nodes, request_node=self.cluster.cluster_nodes[0], @@ -97,10 +79,10 @@ class Test_http_object(ClusterTestBase): with reporter.step("[Negative] try to get object: [get/$CID/chapter1/peace]"): attrs = {obj_key1: obj_value1, obj_key2: obj_value2} - request = f"/get/{cid}/{obj_key1}/{obj_value1}" + request = f"/get/{container}/{obj_key1}/{obj_value1}" expected_err_msg = "Failed to get object via HTTP gate:" try_to_get_object_via_passed_request_and_expect_error( - cid=cid, + cid=container, oid=oid, node=self.cluster.cluster_nodes[0], error_pattern=expected_err_msg, @@ -111,15 +93,15 @@ class Test_http_object(ClusterTestBase): with reporter.step("Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"): get_object_by_attr_and_verify_hashes( oid=oid, - file_name=file_path, - cid=cid, + file_name=test_file.path, + cid=container, attrs=attrs, node=self.cluster.cluster_nodes[0], ) with reporter.step("[Negative] try to get object: get_by_attribute/$CID/$OID"): - request = f"/get_by_attribute/{cid}/{oid}" + request = f"/get_by_attribute/{container}/{oid}" try_to_get_object_via_passed_request_and_expect_error( - cid=cid, + cid=container, oid=oid, node=self.cluster.cluster_nodes[0], error_pattern=expected_err_msg, @@ -128,7 +110,7 @@ class Test_http_object(ClusterTestBase): @allure.title("Put over s3, Get over HTTP with bucket name and key (object_size={object_size})") @pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True) - def test_object_put_get_bucketname_key(self, object_size: ObjectSize, s3_client: S3ClientWrapper): + def test_object_put_get_bucketname_key(self, test_file: TestFile, s3_client: S3ClientWrapper): """ Test that object can be put using s3-gateway interface and got via HTTP with bucket name and object key. @@ -143,10 +125,9 @@ class Test_http_object(ClusterTestBase): Hashes must be the same. """ - file_path = generate_file(object_size.value) - object_key = s3_helper.object_key_from_file_path(file_path) + object_key = s3_helper.object_key_from_file_path(test_file.path) bucket = s3_client.create_bucket(acl="public-read-write") - s3_client.put_object(bucket=bucket, filepath=file_path, key=object_key) + s3_client.put_object(bucket=bucket, filepath=test_file.path, key=object_key) obj_s3 = s3_client.get_object(bucket=bucket, key=object_key) request = f"/get/{bucket}/{object_key}" @@ -157,4 +138,4 @@ class Test_http_object(ClusterTestBase): request_path=request, ) with reporter.step("Verify hashes"): - assert_hashes_are_equal(file_path, obj_http, obj_s3) + assert_hashes_are_equal(test_file.path, obj_http, obj_s3) diff --git a/pytest_tests/testsuites/services/http_gate/test_http_streaming.py b/pytest_tests/testsuites/services/http_gate/test_http_streaming.py index f24d60d..027e1cb 100644 --- a/pytest_tests/testsuites/services/http_gate/test_http_streaming.py +++ b/pytest_tests/testsuites/services/http_gate/test_http_streaming.py @@ -3,28 +3,23 @@ import logging import allure import pytest from frostfs_testlib import reporter -from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL -from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash from frostfs_testlib.storage.dataclasses.object_size import ObjectSize +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.utils.file_utils import generate_file +from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container + logger = logging.getLogger("NeoLogger") @pytest.mark.http_gate @pytest.mark.http_put class Test_http_streaming(ClusterTestBase): - PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" - - @pytest.fixture(scope="class", autouse=True) - @allure.title("[Class/Autouse]: Prepare wallet and deposit") - def prepare_wallet(self, default_wallet): - Test_http_streaming.wallet = default_wallet - @allure.title("Put via pipe (streaming), Get over HTTP and verify hashes") - def test_object_can_be_put_get_by_streaming(self, complex_object_size: ObjectSize): + @requires_container(REP_2_1_4_PUBLIC) + def test_object_can_be_put_get_by_streaming(self, default_wallet: WalletInfo, container: str, complex_object_size: ObjectSize): """ Test that object can be put using gRPC interface and get using HTTP. @@ -37,27 +32,20 @@ class Test_http_streaming(ClusterTestBase): Expected result: Hashes must be the same. """ - with reporter.step("Create public container and verify container creation"): - cid = create_container( - self.wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=self.PLACEMENT_RULE, - basic_acl=PUBLIC_ACL, - ) + with reporter.step("Allocate big object"): # Generate file file_path = generate_file(complex_object_size.value) - with reporter.step("Put objects using curl utility and Get object and verify hashes [ get/$CID/$OID ]"): - oid = upload_via_http_gate_curl( - cid=cid, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint - ) + with reporter.step("Put objects using curl utility"): + oid = upload_via_http_gate_curl(container, file_path, self.cluster.default_http_gate_endpoint) + + with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"): verify_object_hash( oid=oid, file_name=file_path, - wallet=self.wallet, - cid=cid, + wallet=default_wallet, + cid=container, shell=self.shell, nodes=self.cluster.storage_nodes, request_node=self.cluster.cluster_nodes[0], diff --git a/pytest_tests/testsuites/services/http_gate/test_http_system_header.py b/pytest_tests/testsuites/services/http_gate/test_http_system_header.py index 50446a5..a4cbcb3 100644 --- a/pytest_tests/testsuites/services/http_gate/test_http_system_header.py +++ b/pytest_tests/testsuites/services/http_gate/test_http_system_header.py @@ -7,8 +7,6 @@ import allure import pytest from frostfs_testlib import reporter from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND -from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL -from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.cli.object import get_netmap_netinfo, get_object_from_random_node, head_object from frostfs_testlib.steps.epoch import get_epoch, wait_for_epochs_align from frostfs_testlib.steps.http.http_gate import ( @@ -18,9 +16,12 @@ from frostfs_testlib.steps.http.http_gate import ( verify_object_hash, ) from frostfs_testlib.storage.dataclasses.object_size import ObjectSize +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.utils.file_utils import generate_file +from ....helpers.container_request import REP_2_1_2_PUBLIC, requires_container + logger = logging.getLogger("NeoLogger") EXPIRATION_TIMESTAMP_HEADER = "__SYSTEM__EXPIRATION_TIMESTAMP" @@ -38,29 +39,11 @@ SYSTEM_EXPIRATION_RFC3339 = "System-Expiration-RFC3339" @pytest.mark.http_gate @pytest.mark.http_put class Test_http_system_header(ClusterTestBase): - PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" - - @pytest.fixture(scope="class", autouse=True) - @allure.title("[Class/Autouse]: Prepare wallet and deposit") - def prepare_wallet(self, default_wallet): - Test_http_system_header.wallet = default_wallet - - @pytest.fixture(scope="class") - @allure.title("Create container") - def user_container(self): - return create_container( - wallet=self.wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule=self.PLACEMENT_RULE, - basic_acl=PUBLIC_ACL, - ) - @pytest.fixture(scope="class") @allure.title("epoch_duration in seconds") - def epoch_duration(self) -> int: + def epoch_duration(self, default_wallet: WalletInfo) -> int: net_info = get_netmap_netinfo( - wallet=self.wallet, + wallet=default_wallet, endpoint=self.cluster.default_rpc_endpoint, shell=self.shell, ) @@ -83,7 +66,7 @@ class Test_http_system_header(ClusterTestBase): else: return str(calendar.timegm(future_datetime.timetuple())) - @allure.title("Check is (header_output) Key=Value exists and equal in passed (header_to_find)") + @allure.title("Check if (header_output) Key=Value exists and equal in passed (header_to_find)") def check_key_value_presented_header(self, header_output: dict, header_to_find: dict) -> bool: header_att = header_output["header"]["attributes"] for key_to_check, val_to_check in header_to_find.items(): @@ -112,25 +95,25 @@ class Test_http_system_header(ClusterTestBase): ), f"Only {EXPIRATION_EXPIRATION_RFC} can be displayed in header attributes" @allure.title("Put / get / verify object and return head command result to invoker") - def oid_header_info_for_object(self, file_path: str, attributes: dict, user_container: str): + def oid_header_info_for_object(self, default_wallet: WalletInfo, container: str, test_file: str, attributes: dict): oid = upload_via_http_gate_curl( - cid=user_container, - filepath=file_path, + cid=container, + filepath=test_file, endpoint=self.cluster.default_http_gate_endpoint, headers=attr_into_str_header_curl(attributes), ) verify_object_hash( oid=oid, - file_name=file_path, - wallet=self.wallet, - cid=user_container, + file_name=test_file, + wallet=default_wallet, + cid=container, shell=self.shell, nodes=self.cluster.storage_nodes, request_node=self.cluster.cluster_nodes[0], ) head = head_object( - wallet=self.wallet, - cid=user_container, + wallet=default_wallet, + cid=container, oid=oid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, @@ -138,12 +121,13 @@ class Test_http_system_header(ClusterTestBase): return oid, head @allure.title("[NEGATIVE] Put object with expired epoch") - def test_unable_put_expired_epoch(self, user_container: str, simple_object_size: ObjectSize): + @requires_container(REP_2_1_2_PUBLIC) + def test_unable_put_expired_epoch(self, container: str, simple_object_size: ObjectSize): headers = attr_into_str_header_curl({"System-Expiration-Epoch": str(get_epoch(self.shell, self.cluster) - 1)}) file_path = generate_file(simple_object_size.value) with reporter.step("Put object using HTTP with attribute Expiration-Epoch where epoch is expired"): upload_via_http_gate_curl( - cid=user_container, + cid=container, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint, headers=headers, @@ -151,12 +135,13 @@ class Test_http_system_header(ClusterTestBase): ) @allure.title("[NEGATIVE] Put object with negative System-Expiration-Duration") - def test_unable_put_negative_duration(self, user_container: str, simple_object_size: ObjectSize): + @requires_container(REP_2_1_2_PUBLIC) + def test_unable_put_negative_duration(self, container: str, simple_object_size: ObjectSize): headers = attr_into_str_header_curl({"System-Expiration-Duration": "-1h"}) file_path = generate_file(simple_object_size.value) with reporter.step("Put object using HTTP with attribute System-Expiration-Duration where duration is negative"): upload_via_http_gate_curl( - cid=user_container, + cid=container, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint, headers=headers, @@ -164,12 +149,13 @@ class Test_http_system_header(ClusterTestBase): ) @allure.title("[NEGATIVE] Put object with System-Expiration-Timestamp value in the past") - def test_unable_put_expired_timestamp(self, user_container: str, simple_object_size: ObjectSize): + @requires_container(REP_2_1_2_PUBLIC) + def test_unable_put_expired_timestamp(self, container: str, simple_object_size: ObjectSize): headers = attr_into_str_header_curl({"System-Expiration-Timestamp": "1635075727"}) file_path = generate_file(simple_object_size.value) with reporter.step("Put object using HTTP with attribute System-Expiration-Timestamp where duration is in the past"): upload_via_http_gate_curl( - cid=user_container, + cid=container, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint, headers=headers, @@ -177,11 +163,12 @@ class Test_http_system_header(ClusterTestBase): ) @allure.title("[NEGATIVE] Put object using HTTP with attribute System-Expiration-RFC3339 where duration is in the past") - def test_unable_put_expired_rfc(self, user_container: str, simple_object_size: ObjectSize): + @requires_container(REP_2_1_2_PUBLIC) + def test_unable_put_expired_rfc(self, container: str, simple_object_size: ObjectSize): headers = attr_into_str_header_curl({"System-Expiration-RFC3339": "2021-11-22T09:55:49Z"}) file_path = generate_file(simple_object_size.value) upload_via_http_gate_curl( - cid=user_container, + cid=container, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint, headers=headers, @@ -189,7 +176,10 @@ class Test_http_system_header(ClusterTestBase): ) @allure.title("Priority of attributes epoch>duration (obj_size={object_size})") - def test_http_attr_priority_epoch_duration(self, user_container: str, object_size: ObjectSize, epoch_duration: int): + @requires_container(REP_2_1_2_PUBLIC) + def test_http_attr_priority_epoch_duration( + self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int + ): self.tick_epoch() epoch_count = 1 expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count @@ -201,7 +191,7 @@ class Test_http_system_header(ClusterTestBase): with reporter.step( f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr" ): - oid, head_info = self.oid_header_info_for_object(file_path=file_path, attributes=attributes, user_container=user_container) + oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container) self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch) with reporter.step("Check that object becomes unavailable when epoch is expired"): for _ in range(0, epoch_count + 1): @@ -213,17 +203,20 @@ class Test_http_system_header(ClusterTestBase): with reporter.step("Check object deleted because it expires-on epoch"): wait_for_epochs_align(self.shell, self.cluster) try_to_get_object_and_expect_error( - cid=user_container, + cid=container, oid=oid, node=self.cluster.cluster_nodes[0], error_pattern="404 Not Found", ) # check that object is not available via grpc with pytest.raises(Exception, match=OBJECT_NOT_FOUND): - get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster) + get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster) @allure.title("Priority of attributes duration>timestamp (obj_size={object_size})") - def test_http_attr_priority_dur_timestamp(self, user_container: str, object_size: ObjectSize, epoch_duration: int): + @requires_container(REP_2_1_2_PUBLIC) + def test_http_attr_priority_dur_timestamp( + self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int + ): self.tick_epoch() epoch_count = 2 expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count @@ -238,7 +231,7 @@ class Test_http_system_header(ClusterTestBase): with reporter.step( f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr" ): - oid, head_info = self.oid_header_info_for_object(file_path=file_path, attributes=attributes, user_container=user_container) + oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container) self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch) with reporter.step("Check that object becomes unavailable when epoch is expired"): for _ in range(0, epoch_count + 1): @@ -250,17 +243,20 @@ class Test_http_system_header(ClusterTestBase): with reporter.step("Check object deleted because it expires-on epoch"): wait_for_epochs_align(self.shell, self.cluster) try_to_get_object_and_expect_error( - cid=user_container, + cid=container, oid=oid, node=self.cluster.cluster_nodes[0], error_pattern="404 Not Found", ) # check that object is not available via grpc with pytest.raises(Exception, match=OBJECT_NOT_FOUND): - get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster) + get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster) @allure.title("Priority of attributes timestamp>Expiration-RFC (obj_size={object_size})") - def test_http_attr_priority_timestamp_rfc(self, user_container: str, object_size: ObjectSize, epoch_duration: int): + @requires_container(REP_2_1_2_PUBLIC) + def test_http_attr_priority_timestamp_rfc( + self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int + ): self.tick_epoch() epoch_count = 2 expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count @@ -275,7 +271,7 @@ class Test_http_system_header(ClusterTestBase): with reporter.step( f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr" ): - oid, head_info = self.oid_header_info_for_object(file_path=file_path, attributes=attributes, user_container=user_container) + oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container) self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch) with reporter.step("Check that object becomes unavailable when epoch is expired"): for _ in range(0, epoch_count + 1): @@ -287,14 +283,14 @@ class Test_http_system_header(ClusterTestBase): with reporter.step("Check object deleted because it expires-on epoch"): wait_for_epochs_align(self.shell, self.cluster) try_to_get_object_and_expect_error( - cid=user_container, + cid=container, oid=oid, node=self.cluster.cluster_nodes[0], error_pattern="404 Not Found", ) # check that object is not available via grpc with pytest.raises(Exception, match=OBJECT_NOT_FOUND): - get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster) + get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster) @allure.title("Object should be deleted when expiration passed (obj_size={object_size})") @pytest.mark.parametrize( @@ -303,7 +299,10 @@ class Test_http_system_header(ClusterTestBase): ["simple"], indirect=True, ) - def test_http_rfc_object_unavailable_after_expir(self, user_container: str, object_size: ObjectSize, epoch_duration: int): + @requires_container(REP_2_1_2_PUBLIC) + def test_http_rfc_object_unavailable_after_expir( + self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int + ): self.tick_epoch() epoch_count = 2 expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count @@ -315,11 +314,7 @@ class Test_http_system_header(ClusterTestBase): with reporter.step( f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr" ): - oid, head_info = self.oid_header_info_for_object( - file_path=file_path, - attributes=attributes, - user_container=user_container, - ) + oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container) self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch) with reporter.step("Check that object becomes unavailable when epoch is expired"): for _ in range(0, epoch_count + 1): @@ -332,11 +327,11 @@ class Test_http_system_header(ClusterTestBase): with reporter.step("Check object deleted because it expires-on epoch"): wait_for_epochs_align(self.shell, self.cluster) try_to_get_object_and_expect_error( - cid=user_container, + cid=container, oid=oid, node=self.cluster.cluster_nodes[0], error_pattern="404 Not Found", ) # check that object is not available via grpc with pytest.raises(Exception, match=OBJECT_NOT_FOUND): - get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster) + get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster) diff --git a/pytest_tests/testsuites/shard/__init__.py b/pytest_tests/testsuites/shard/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pytest_tests/testsuites/shard/test_control_shard.py b/pytest_tests/testsuites/shard/test_control_shard.py index c445279..4048a8f 100644 --- a/pytest_tests/testsuites/shard/test_control_shard.py +++ b/pytest_tests/testsuites/shard/test_control_shard.py @@ -1,13 +1,13 @@ import json +import os import allure import pytest from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE -from frostfs_testlib.steps.cli.container import create_container, delete_container -from frostfs_testlib.steps.cli.object import delete_object, get_object, get_object_nodes, put_object +from frostfs_testlib.shell.interfaces import Shell +from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, put_object from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager @@ -17,65 +17,86 @@ from frostfs_testlib.testing import parallel, wait_for_success from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.utils.file_utils import generate_file +from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container + + +def set_shard_rw_mode(node: ClusterNode): + watcher = ShardsWatcher(node) + shards = watcher.get_shards() + for shard in shards: + watcher.set_shard_mode(shard["shard_id"], mode="read-write") + watcher.await_for_all_shards_status(status="read-write") + + +@pytest.fixture() +@allure.title("Revert all shards mode") +def revert_all_shards_mode(cluster: Cluster) -> None: + yield + parallel(set_shard_rw_mode, cluster.cluster_nodes) + + +@pytest.fixture() +def object_id(client_shell: Shell, cluster: Cluster, container: str, default_wallet: WalletInfo, max_object_size: int) -> str: + with reporter.step("Create container, and put object"): + file = generate_file(round(max_object_size * 0.8)) + oid = put_object(default_wallet, file, container, client_shell, cluster.default_rpc_endpoint) + return oid + + +@pytest.fixture() +def node_with_object(cluster: Cluster, container: str, object_id: str) -> ClusterNode: + with reporter.step("Search node with object"): + nodes = get_object_nodes(cluster, container, object_id, cluster.cluster_nodes[0]) + + return nodes[0] + + +@pytest.fixture() +@wait_for_success(180, 30, title="Search object in system") +def object_path_on_node(object_id: str, container: str, node_with_object: ClusterNode) -> str: + oid_path = f"{object_id[0]}/{object_id[1]}/{object_id[2]}/{object_id[3]}" + object_path = None + + with reporter.step("Search object file"): + node_shell = node_with_object.storage_node.host.get_shell() + data_path = node_with_object.storage_node.get_data_directory() + all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip() + for data_dir in all_datas.replace(".", "").strip().split("\n"): + check_dir = node_shell.exec(f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0").stdout + if "1" in check_dir: + object_path = f"{data_path}/data/{data_dir}/data/{oid_path}" + object_name = f"{object_id[4:]}.{container}" + break + + assert object_path is not None, f"{object_id} object not found in directory - {data_path}/data" + return os.path.join(object_path, object_name) + + +@pytest.fixture() +def erroneous_object_id(object_id: str, object_path_on_node: str, node_with_object: ClusterNode): + with reporter.step("Block read file"): + node_with_object.host.get_shell().exec(f"chmod a-r {object_path_on_node}") + + yield object_id + + with reporter.step("Restore file access"): + node_with_object.host.get_shell().exec(f"chmod +r {object_path_on_node}") + + +@pytest.fixture() +def change_config_storage(cluster_state_controller: ClusterStateController): + with reporter.step("Change threshold error shards"): + cluster_state_controller.manager(ConfigStateManager).set_on_all_nodes( + service_type=StorageNode, values={"storage:shard_ro_error_threshold": "5"} + ) + yield + with reporter.step("Restore threshold error shards"): + cluster_state_controller.manager(ConfigStateManager).revert_all() + @pytest.mark.nightly @pytest.mark.shard class TestControlShard(ClusterTestBase): - @staticmethod - @wait_for_success(180, 30) - def get_object_path_and_name_file(oid: str, cid: str, node: ClusterNode) -> tuple[str, str]: - oid_path = f"{oid[0]}/{oid[1]}/{oid[2]}/{oid[3]}" - object_path = None - - with reporter.step("Search object file"): - node_shell = node.storage_node.host.get_shell() - data_path = node.storage_node.get_data_directory() - all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip() - for data_dir in all_datas.replace(".", "").strip().split("\n"): - check_dir = node_shell.exec(f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0").stdout - if "1" in check_dir: - object_path = f"{data_path}/data/{data_dir}/data/{oid_path}" - object_name = f"{oid[4:]}.{cid}" - break - - assert object_path is not None, f"{oid} object not found in directory - {data_path}/data" - return object_path, object_name - - def set_shard_rw_mode(self, node: ClusterNode): - watcher = ShardsWatcher(node) - shards = watcher.get_shards() - for shard in shards: - watcher.set_shard_mode(shard["shard_id"], mode="read-write") - watcher.await_for_all_shards_status(status="read-write") - - @pytest.fixture() - @allure.title("Revert all shards mode") - def revert_all_shards_mode(self) -> None: - yield - parallel(self.set_shard_rw_mode, self.cluster.cluster_nodes) - - @pytest.fixture() - def oid_cid_node(self, default_wallet: WalletInfo, max_object_size: int) -> tuple[str, str, ClusterNode]: - with reporter.step("Create container, and put object"): - cid = create_container( - wallet=default_wallet, - shell=self.shell, - endpoint=self.cluster.default_rpc_endpoint, - rule="REP 1 CBF 1", - basic_acl=EACL_PUBLIC_READ_WRITE, - ) - file = generate_file(round(max_object_size * 0.8)) - oid = put_object(wallet=default_wallet, path=file, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) - with reporter.step("Search node with object"): - nodes = get_object_nodes(cluster=self.cluster, cid=cid, oid=oid, alive_node=self.cluster.cluster_nodes[0]) - - yield oid, cid, nodes[0] - - object_path, object_name = self.get_object_path_and_name_file(oid, cid, nodes[0]) - nodes[0].host.get_shell().exec(f"chmod +r {object_path}/{object_name}") - delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) - delete_container(wallet=default_wallet, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) - @staticmethod def get_shards_from_cli(node: StorageNode) -> list[Shard]: wallet_path = node.get_remote_wallet_path() @@ -94,16 +115,6 @@ class TestControlShard(ClusterTestBase): ) return [Shard.from_object(shard) for shard in json.loads(result.stdout.split(">", 1)[1])] - @pytest.fixture() - def change_config_storage(self, cluster_state_controller: ClusterStateController): - with reporter.step("Change threshold error shards"): - cluster_state_controller.manager(ConfigStateManager).set_on_all_nodes( - service_type=StorageNode, values={"storage:shard_ro_error_threshold": "5"} - ) - yield - with reporter.step("Restore threshold error shards"): - cluster_state_controller.manager(ConfigStateManager).revert_all() - @allure.title("All shards are available") def test_control_shard(self, cluster: Cluster): for storage_node in cluster.storage_nodes: @@ -114,31 +125,25 @@ class TestControlShard(ClusterTestBase): @allure.title("Shard become read-only when errors exceeds threshold") @pytest.mark.failover + @requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1")) def test_shard_errors( self, default_wallet: WalletInfo, - oid_cid_node: tuple[str, str, ClusterNode], + container: str, + node_with_object: ClusterNode, + erroneous_object_id: str, + object_path_on_node: str, change_config_storage: None, revert_all_shards_mode: None, ): - oid, cid, node = oid_cid_node - with reporter.step("Search object in system."): - object_path, object_name = self.get_object_path_and_name_file(*oid_cid_node) - with reporter.step("Block read file"): - node.host.get_shell().exec(f"chmod a-r {object_path}/{object_name}") with reporter.step("Get object, expect 6 errors"): for _ in range(6): with pytest.raises(RuntimeError): - get_object( - wallet=default_wallet, - cid=cid, - oid=oid, - shell=self.shell, - endpoint=node.storage_node.get_rpc_endpoint(), - ) + get_object(default_wallet, container, erroneous_object_id, self.shell, node_with_object.storage_node.get_rpc_endpoint()) + with reporter.step("Check shard status"): - for shard in ShardsWatcher(node).get_shards(): - if shard["blobstor"][1]["path"] in object_path: - with reporter.step(f"Shard - {shard['shard_id']} to {node.host_ip}, mode - {shard['mode']}"): + for shard in ShardsWatcher(node_with_object).get_shards(): + if shard["blobstor"][1]["path"] in object_path_on_node: + with reporter.step(f"Shard {shard['shard_id']} should be in read-only mode"): assert shard["mode"] == "read-only" break