Compare commits

...

10 commits

Author SHA1 Message Date
7c788057db Fix empty map tests
Signed-off-by: anikeev-yadro <a.anikeev@yadro.com>
2023-09-27 18:28:56 +03:00
a0ea180aa9 add policy 2023-09-25 13:59:17 +00:00
73a9c95704 Attach ACL wallets to allure
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2023-09-21 19:49:31 +03:00
d38e05c100 Fix teardown for network test
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2023-09-19 14:35:21 +00:00
2a1d40680a Add new fixture
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-09-19 08:35:27 +00:00
ed15485b72 Add new fixture
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-09-19 08:35:27 +00:00
3021805f7e Fix tests title
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2023-09-11 16:59:06 +03:00
1cd077fdf3 Update test titles to conform standard 2023-09-08 10:42:47 +00:00
4d2e27a317 Fix policy
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-09-07 13:40:42 +03:00
967f4f37d9 Change file_pith fixture and change title test acl
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-09-07 08:59:03 +00:00
34 changed files with 809 additions and 531 deletions

View file

@ -54,7 +54,7 @@ def wallets(default_wallet: str, temp_directory: str, cluster: Cluster) -> Walle
storage_wallet_path = storage_node.get_wallet_path() storage_wallet_path = storage_node.get_wallet_path()
storage_wallet_config = storage_node.get_wallet_config_path() storage_wallet_config = storage_node.get_wallet_config_path()
yield Wallets( wallets_collection = Wallets(
wallets={ wallets={
EACLRole.USER: [Wallet(wallet_path=default_wallet, config_path=DEFAULT_WALLET_CONFIG)], EACLRole.USER: [Wallet(wallet_path=default_wallet, config_path=DEFAULT_WALLET_CONFIG)],
EACLRole.OTHERS: [ EACLRole.OTHERS: [
@ -68,10 +68,22 @@ def wallets(default_wallet: str, temp_directory: str, cluster: Cluster) -> Walle
} }
) )
for role, wallets in wallets_collection.wallets.items():
if role == EACLRole.SYSTEM:
continue
for wallet in wallets:
allure.attach.file(
wallet.wallet_path,
os.path.basename(wallet.wallet_path),
allure.attachment_type.JSON,
)
@pytest.fixture(scope="module") return wallets_collection
def file_path(simple_object_size: ObjectSize) -> str:
yield generate_file(simple_object_size.value)
@pytest.fixture()
def file_path(object_size: ObjectSize) -> str:
yield generate_file(object_size.value)
@pytest.fixture(scope="function") @pytest.fixture(scope="function")

View file

@ -68,7 +68,7 @@ class TestACLBasic(ClusterTestBase):
# with allure.step('Delete public readonly container'): # with allure.step('Delete public readonly container'):
# delete_container(user_wallet.wallet_path, cid_read_only) # delete_container(user_wallet.wallet_path, cid_read_only)
@allure.title("Test basic ACL on public container") @allure.title("Operations with basic ACL on public container (obj_size={object_size})")
def test_basic_acl_public(self, wallets: Wallets, public_container: str, file_path: str): def test_basic_acl_public(self, wallets: Wallets, public_container: str, file_path: str):
""" """
Test basic ACL set during public container creation. Test basic ACL set during public container creation.
@ -114,7 +114,7 @@ class TestACLBasic(ClusterTestBase):
cluster=self.cluster, cluster=self.cluster,
) )
@allure.title("Test basic ACL on private container") @allure.title("Operations with basic ACL on PRIVATE container (obj_size={object_size})")
def test_basic_acl_private(self, wallets: Wallets, private_container: str, file_path: str): def test_basic_acl_private(self, wallets: Wallets, private_container: str, file_path: str):
""" """
Test basic ACL set during private container creation. Test basic ACL set during private container creation.
@ -148,7 +148,7 @@ class TestACLBasic(ClusterTestBase):
cluster=self.cluster, cluster=self.cluster,
) )
@allure.title("Test basic ACL on readonly container") @allure.title("Operations with basic ACL on READONLY container (obj_size={object_size})")
def test_basic_acl_readonly( def test_basic_acl_readonly(
self, wallets: Wallets, client_shell: Shell, read_only_container: str, file_path: str self, wallets: Wallets, client_shell: Shell, read_only_container: str, file_path: str
): ):

View file

@ -21,7 +21,7 @@ from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.acl @pytest.mark.acl
@pytest.mark.acl_bearer @pytest.mark.acl_bearer
class TestACLBearer(ClusterTestBase): class TestACLBearer(ClusterTestBase):
@allure.title("Validate FrostFS operations with {role.value} BearerToken") @allure.title("Operations with BearerToken (role={role.value}, obj_size={object_size})")
@pytest.mark.parametrize("role", [EACLRole.USER, EACLRole.OTHERS]) @pytest.mark.parametrize("role", [EACLRole.USER, EACLRole.OTHERS])
def test_bearer_token_operations( def test_bearer_token_operations(
self, self,
@ -113,7 +113,7 @@ class TestACLBearer(ClusterTestBase):
cluster=self.cluster, cluster=self.cluster,
) )
@allure.title("BearerToken Operations for compound Operations") @allure.title("BearerToken for compound operations (obj_size={object_size})")
def test_bearer_token_compound_operations(self, wallets, eacl_container_with_objects): def test_bearer_token_compound_operations(self, wallets, eacl_container_with_objects):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
cid, objects_oids, file_path = eacl_container_with_objects cid, objects_oids, file_path = eacl_container_with_objects

View file

@ -58,7 +58,7 @@ class TestEACLContainer(ClusterTestBase):
yield cid, oid, file_path yield cid, oid, file_path
@allure.title("Deny FrostFS operations for {deny_role.value}") @allure.title("Deny operations (role={deny_role.value}, obj_size={object_size})")
@pytest.mark.parametrize("deny_role", [EACLRole.USER, EACLRole.OTHERS]) @pytest.mark.parametrize("deny_role", [EACLRole.USER, EACLRole.OTHERS])
def test_extended_acl_deny_all_operations( def test_extended_acl_deny_all_operations(
self, self,
@ -145,7 +145,7 @@ class TestEACLContainer(ClusterTestBase):
cluster=self.cluster, cluster=self.cluster,
) )
@allure.title("Allow FrostFS operations for only one other pubkey") @allure.title("Operations for only one other pubkey (obj_size={object_size})")
def test_extended_acl_deny_all_operations_exclude_pubkey( def test_extended_acl_deny_all_operations_exclude_pubkey(
self, wallets: Wallets, eacl_container_with_objects: tuple[str, list[str], str] self, wallets: Wallets, eacl_container_with_objects: tuple[str, list[str], str]
): ):
@ -206,7 +206,7 @@ class TestEACLContainer(ClusterTestBase):
cluster=self.cluster, cluster=self.cluster,
) )
@allure.title("Replication with eACL deny rules") @allure.title("Replication with eACL deny rules (obj_size={object_size})")
def test_extended_acl_deny_replication( def test_extended_acl_deny_replication(
self, self,
wallets: Wallets, wallets: Wallets,
@ -248,7 +248,7 @@ class TestEACLContainer(ClusterTestBase):
storage_nodes, storage_nodes,
) )
@allure.title("System operations with extended ACL") @allure.title("Operations with extended ACL for SYSTEM (obj_size={object_size})")
def test_extended_actions_system( def test_extended_actions_system(
self, wallets: Wallets, eacl_container_with_objects: tuple[str, list[str], str] self, wallets: Wallets, eacl_container_with_objects: tuple[str, list[str], str]
): ):

View file

@ -128,7 +128,9 @@ class TestEACLFilters(ClusterTestBase):
endpoint=self.cluster.default_rpc_endpoint, endpoint=self.cluster.default_rpc_endpoint,
) )
@allure.title("Validate FrostFS operations with request filter: {match_type}") @allure.title(
"Operations with request filter (match_type={match_type}, obj_size={object_size})"
)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL] "match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
) )
@ -245,7 +247,9 @@ class TestEACLFilters(ClusterTestBase):
bearer=bearer_other, bearer=bearer_other,
) )
@allure.title("Validate FrostFS operations with deny user headers filter: {match_type}") @allure.title(
"Operations with deny user headers filter (match_type={match_type}, obj_size={object_size})"
)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL] "match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
) )
@ -428,7 +432,9 @@ class TestEACLFilters(ClusterTestBase):
bearer=bearer_other_for_put, bearer=bearer_other_for_put,
) )
@allure.title("Validate FrostFS operation with allow eACL user headers filters: {match_type}") @allure.title(
"Operations with allow eACL user headers filters (match_type={match_type}, obj_size={object_size})"
)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL] "match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
) )

View file

@ -115,17 +115,29 @@ def max_object_size(cluster: Cluster, client_shell: Shell) -> int:
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def simple_object_size(max_object_size: int) -> int: def simple_object_size(max_object_size: int) -> ObjectSize:
size = min(int(SIMPLE_OBJECT_SIZE), max_object_size) size = min(int(SIMPLE_OBJECT_SIZE), max_object_size)
return ObjectSize("simple", size) return ObjectSize("simple", size)
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def complex_object_size(max_object_size: int) -> int: def complex_object_size(max_object_size: int) -> ObjectSize:
size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE) size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
return ObjectSize("complex", size) return ObjectSize("complex", size)
# By default we want all tests to be executed with both object sizes
# This can be overriden in choosen tests if needed
@pytest.fixture(scope="session", params=["simple", "complex"])
def object_size(
simple_object_size: ObjectSize, complex_object_size: ObjectSize, request: pytest.FixtureRequest
) -> ObjectSize:
if request.param == "simple":
return simple_object_size
return complex_object_size
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def wallet_factory(temp_directory: str, client_shell: Shell, cluster: Cluster) -> WalletFactory: def wallet_factory(temp_directory: str, client_shell: Shell, cluster: Cluster) -> WalletFactory:
return WalletFactory(temp_directory, client_shell, cluster) return WalletFactory(temp_directory, client_shell, cluster)
@ -166,6 +178,7 @@ def s3_client(
client_shell: Shell, client_shell: Shell,
s3_policy: Optional[str], s3_policy: Optional[str],
cluster: Cluster, cluster: Cluster,
auth_container_placement_policy: str,
request: pytest.FixtureRequest, request: pytest.FixtureRequest,
) -> S3ClientWrapper: ) -> S3ClientWrapper:
wallet = WalletInfo(path=default_wallet, password=DEFAULT_WALLET_PASS) wallet = WalletInfo(path=default_wallet, password=DEFAULT_WALLET_PASS)
@ -176,6 +189,7 @@ def s3_client(
cluster, cluster,
s3gates=[cluster_node.s3_gate for cluster_node in cluster.cluster_nodes], s3gates=[cluster_node.s3_gate for cluster_node in cluster.cluster_nodes],
policy=s3_policy, policy=s3_policy,
container_placement_policy=auth_container_placement_policy,
) )
containers_list = list_containers( containers_list = list_containers(
wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
@ -273,3 +287,18 @@ def default_wallet(wallet_factory: WalletFactory) -> str:
wallet = wallet_factory.create_wallet(password=DEFAULT_WALLET_PASS) wallet = wallet_factory.create_wallet(password=DEFAULT_WALLET_PASS)
allure.attach.file(wallet.path, os.path.basename(wallet.path), allure.attachment_type.JSON) allure.attach.file(wallet.path, os.path.basename(wallet.path), allure.attachment_type.JSON)
return wallet.path return wallet.path
@allure.step("[Class]: Container placement policy for keys")
@pytest.fixture(scope="class")
def auth_container_placement_policy(cluster: Cluster, request: pytest.FixtureRequest):
placeholders = {
"$ALPHABET_NODE_COUNT$": 4 if len(cluster.cluster_nodes) < 8 else 8,
"$NODE_COUNT$": len(cluster.cluster_nodes),
}
placement_policy = None
if "param" in request.__dict__:
placement_policy = request.param
for key, value in placeholders.items():
placement_policy = placement_policy.replace(key, str(value))
return placement_policy

View file

@ -22,8 +22,8 @@ class TestContainer(ClusterTestBase):
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"]) @pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
@pytest.mark.smoke @pytest.mark.smoke
def test_container_creation(self, default_wallet: str, name: str): def test_container_creation(self, default_wallet: str, name: str):
scenario_title = f"with name {name}" if name else "without name" scenario_title = "with name" if name else "without name"
allure.dynamic.title(f"User can create container {scenario_title}") allure.dynamic.title(f"Create container {scenario_title}")
wallet = default_wallet wallet = default_wallet
with open(wallet) as file: with open(wallet) as file:

View file

@ -0,0 +1,477 @@
from frostfs_testlib.steps.cli.container import (
create_container,
delete_container,
get_container,
)
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.utility import placement_policy_from_container
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container, get_container
from frostfs_testlib.steps.cli.object import (
put_object_to_random_node,
)
from frostfs_testlib.steps.node_management import (
check_node_in_map,
)
from frostfs_testlib.steps.storage_policy import get_nodes_with_object, get_simple_object_copies
from pytest_tests.helpers.utility import (
placement_policy_from_container,
)
@pytest.mark.container
@pytest.mark.policy
class TestPolicy(ClusterTestBase):
@pytest.mark.skip(reason="ошибка с фикстурой")
@allure.title("[NEGATIVE] Placement policy")
@pytest.mark.policy
def test_placement_policy_negative(
self, default_wallet, placement_rule
):
"""
Negative test for placement policy.
"""
wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
try:
cid = create_container(
wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
except:
got_policy = placement_policy_from_container(
get_container(wallet, cid, json_mode=False, shell=self.shell, endpoint=endpoint)
)
assert got_policy == placement_rule.replace(
"'", ""
), f"Can't parse placement policy"
@pytest.mark.skip(reason="ошибка с фикстурой")
@allure.title("110569 [NEGATIVE] Placement policy: Not enough nodes to SELECT")
@pytest.mark.policy
def test_placement_policy_negative_not_enough_nodes_to_select(
self, default_wallet, placement_rule
):
"""
Negative test for placement policy: Not enough nodes to SELECT.
"""
wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
with pytest.raises(RuntimeError, match=".*not enough nodes to SELECT from.*"):
cid = create_container(
wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
@pytest.mark.skip(reason="ошибка с фикстурой")
@allure.title("110570 [NEGATIVE] Placement policy: Filter not found")
@pytest.mark.policy
def test_placement_policy_negative_not_enough_nodes_to_filter(
self, default_wallet, placement_rule
):
"""
Negative test for placement policy: Filter not found.
"""
wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
with pytest.raises(RuntimeError, match=".*not enough nodes to FILTER from.*"):
cid = create_container(
wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
@pytest.mark.skip(reason="ошибка с фикстурой")
@allure.title("110572 [NEGATIVE] Placement policy: SELECTOR not found")
@pytest.mark.policy
def test_placement_policy_negative_not_enough_nodes_to_selector(
self, default_wallet, placement_rule
):
"""
Negative test for placement policy: Filter not found.
"""
wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
with pytest.raises(RuntimeError, match=".*not enough nodes to SELECTOR from.*"):
cid = create_container(
wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("REP 1 REP 1 CBF 1", 2, {2, 2}),
]
)
@pytest.mark.policy
@allure.title("110571 Object should have {expected_copies} copies with policy {placement_rule}")
def test_simple_policy_results_with_one_node(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement simple policy results with one node.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("UNIQUE REP 1 IN AnyNode REP 1 IN AnyNode CBF 1 SELECT 1 FROM * AS AnyNode", 2, {2, 3}),
]
)
@pytest.mark.policy
@allure.title("110544 Object should have {expected_copies} copies with policy {placement_rule}")
def test_policy_with_select_results_with_unique_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement policy with SELECT results with UNIQUE nodes.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
('UNIQUE REP 1 IN RUS REP 1 IN RUS CBF 1 SELECT 1 FROM RU AS RUS FILTER Country NE Sweden AS NotSE FILTER @NotSE AND NOT (CountryCode EQ FI) AND Country EQ "Russia" AS RU', 2, {3, 1}),
]
)
@pytest.mark.policy
@allure.title("110545 Object should have {expected_copies} copies with policy {placement_rule}")
def test_policy_with_select_and_complex_filter_results_with_unique_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with UNIQUE nodes.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("""REP 4""",
4, {3, 2, 1, 4}),
]
)
@pytest.mark.policy
@allure.title("110610 Object should have {expected_copies} copies with policy {placement_rule}")
def test_simple_policy_results_with_100_of_available_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement simple policy results with 100% of available nodes.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("UNIQUE REP 1 REP 1 CBF 1", 2, {2, 3}),
]
)
@pytest.mark.policy
@allure.title("110537 Object should have {expected_copies} copies with policy {placement_rule}")
def test_policy_with_select_and_complex_filter_results_with_unique_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement simple policy results with UNIQUE nodes.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("UNIQUE REP 1 REP 1 CBF 1", 2, {2, 3}),
]
)
@pytest.mark.policy
@allure.title("110587 Object should have {expected_copies} copies with policy {placement_rule}")
def test_policy_with_multi_selects_and_filters_results_with_one_node(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with one nodes.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("REP 1 CBF 1", 1, {2}),
]
)
@pytest.mark.policy
@allure.title("110593 Object should have {expected_copies} copies with policy {placement_rule}")
def test_simple_policy_results_with_25_of_available_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement policy results with 25% of available nodes.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("REP 1 IN One CBF 1 SELECT 1 FROM * AS One", 1, {2}),
]
)
@pytest.mark.policy
@allure.title("110594 Object should have {expected_copies} copies with policy {placement_rule}")
def test_policy_with_select_results_with_25_of_available_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement policy with SELECT results with 25% of available nodes.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("REP 1 IN Nodes25 SELECT 1 FROM LE10 AS Nodes25 FILTER Price LE 10 AS LE10", 1, {2}),
]
)
@pytest.mark.policy
@allure.title("110595 Object should have {expected_copies} copies with policy {placement_rule}")
def test_policy_with_select_and_filter_results_with_25_of_available_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 25% of available nodes.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("""REP 1 IN Nodes25 SELECT 1 FROM BET0AND10 AS Nodes25 FILTER Price LE 10 AS LE10 FILTER Price GT 0 AS GT0 FILTER @LE10 AND @GT0 AS BET0AND10""",
1, {1}),
]
)
@pytest.mark.policy
@allure.title("110596 Object should have {expected_copies} copies with policy {placement_rule}")
def test_policy_with_select_and_complex_filter_results_with_25_of_available_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
complex_object_size: ObjectSize,
):
"""
110596 This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 25% of available nodes.
"""
wallet = default_wallet
file_path = generate_file(complex_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("""UNIQUE REP 1 IN MyRussianNodes REP 1 IN MyRussianNodes CBF 1 SELECT 1 FROM RussianNodes AS MyRussianNodes FILTER Country EQ Russia AS RussianNodes""",
2, {3, 1}),
]
)
@pytest.mark.policy
@allure.title("110588 Object should have {expected_copies} copies with policy {placement_rule}")
def test_policy_with_select_and_filter_results_with_unique_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with UNIQUE nodes.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("""UNIQUE REP 1 IN MyRussianNodes REP 1 IN MyRussianNodes CBF 1 SELECT 1 FROM RussianNodes AS MyRussianNodes FILTER Country EQ Russia AS RussianNodes""",
2, {3, 1}),
]
)
@pytest.mark.policy
@allure.title("110586 Object should have {expected_copies} copies with policy {placement_rule}")
def test_policy_with_select_and_filter_results_with_unique_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size: ObjectSize,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with UNIQUE nodes.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
cid, oid = self.validate_object_copies(
wallet, placement_rule, file_path
)
self.check_expected_copies(cid, oid, expected_copies, expected_nodes_id)
@allure.step("Validate policy")
def validate_object_policy(
self, wallet: str, placement_rule: str, cid: str, endpoint: str
):
got_policy = placement_policy_from_container(
get_container(wallet, cid, json_mode=False, shell=self.shell, endpoint=endpoint)
)
assert got_policy == placement_rule.replace(
"'", ""
), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same"
@allure.step("Validate expected copies")
def check_expected_copies(self, cid: str, oid: str, expected_copies: int, expected_copies_id: set):
nodes = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(nodes) == expected_copies, f"Expected {expected_copies} copies, got {len(nodes)}"
nodes_id = {node.id for node in nodes}
assert nodes_id == expected_copies_id, f"Expected {expected_copies_id} copies, got {nodes_id}"
@allure.step("Validate object copies")
def validate_object_copies(
self, wallet: str, placement_rule: str, file_path: str
) -> set[int]:
endpoint = self.cluster.default_rpc_endpoint
with allure.step(f"Create container"):
cid = create_container(
wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
self.validate_object_policy(wallet, placement_rule, cid, endpoint)
with allure.step(f"Put object"):
oid = put_object_to_random_node(
wallet, file_path, cid, shell=self.shell, cluster=self.cluster
)
return cid, oid

View file

@ -18,7 +18,7 @@ from frostfs_testlib.steps.cli.container import (
) )
from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, put_object_to_random_node from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, put_object_to_random_node
from frostfs_testlib.steps.s3.s3_helper import set_bucket_versioning from frostfs_testlib.steps.s3.s3_helper import set_bucket_versioning
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers import ClusterStateController from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
@ -38,7 +38,17 @@ blocked_nodes: list[ClusterNode] = []
def pytest_generate_tests(metafunc): def pytest_generate_tests(metafunc):
if "s3_client" in metafunc.fixturenames: if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient], ids=["aws"], indirect=True) metafunc.parametrize(
"s3_client, auth_container_placement_policy",
[
(
AwsCliClient,
"REP $ALPHABET_NODE_COUNT$ SELECT 4 FROM ALPHA FILTER 'role' EQ 'alphabet' AS ALPHA",
)
],
ids=["aws"],
indirect=True,
)
@pytest.mark.failover @pytest.mark.failover
@ -51,7 +61,7 @@ class TestFailoverNetwork(ClusterTestBase):
with allure.step(f"Count blocked nodes {len(blocked_nodes)}"): with allure.step(f"Count blocked nodes {len(blocked_nodes)}"):
not_empty = len(blocked_nodes) != 0 not_empty = len(blocked_nodes) != 0
for node in list(blocked_nodes): for node in list(blocked_nodes):
with allure.step(f"Restore network at host for {node.label}"): with allure.step(f"Restore network for {node}"):
cluster_state_controller.restore_traffic(mode="ports", node=node) cluster_state_controller.restore_traffic(mode="ports", node=node)
blocked_nodes.remove(node) blocked_nodes.remove(node)
if not_empty: if not_empty:
@ -177,7 +187,9 @@ class TestFailoverSplitBrain(ClusterTestBase):
splitted.append(nodes_list[i::count] + free_nodes[i::count]) splitted.append(nodes_list[i::count] + free_nodes[i::count])
return tuple(s for s in splitted) return tuple(s for s in splitted)
@allure.title("Replication tree after split brain, versioning bucket") @allure.title(
"Replication tree after split brain, versioning bucket (placement_policy={auth_container_placement_policy}, s3_client={s3_client})",
)
def test_versioning_bucket_after_split_brain( def test_versioning_bucket_after_split_brain(
self, self,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
@ -311,7 +323,9 @@ class TestFailoverSplitBrain(ClusterTestBase):
f"{object_version[-1]} " f"!= {bucket_versions[-1]['VersionId']}" f"{object_version[-1]} " f"!= {bucket_versions[-1]['VersionId']}"
) )
@allure.title("Replication tree after split brain, no version bucket") @allure.title(
"Replication tree after split brain, no version bucket (placement_policy={auth_container_placement_policy}, s3_client={s3_client})"
)
def test_no_version_bucket_after_split_brain( def test_no_version_bucket_after_split_brain(
self, self,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,

View file

@ -48,7 +48,7 @@ class TestFailoverServer(ClusterTestBase):
default_wallet: str, default_wallet: str,
) -> list[StorageContainer]: ) -> list[StorageContainer]:
placement_rule = "REP 2 CBF 2 SELECT 2 FROM * AS X" placement_rule = "REP 2 CBF 2 SELECT 2 FROM *"
containers = [] containers = []

View file

@ -4,7 +4,6 @@ from time import sleep
import allure import allure
import pytest import pytest
from frostfs_testlib.analytics import test_case
from frostfs_testlib.hosting import Host from frostfs_testlib.hosting import Host
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
@ -97,7 +96,7 @@ def return_stopped_hosts(shell: Shell, cluster: Cluster) -> None:
@pytest.mark.failover @pytest.mark.failover
class TestFailoverStorage(ClusterTestBase): class TestFailoverStorage(ClusterTestBase):
@allure.title("Lose and return storage node's host ({stop_mode} stop)") @allure.title("Shutdown and start node (stop_mode={stop_mode})")
@pytest.mark.parametrize("stop_mode", ["hard", "soft"]) @pytest.mark.parametrize("stop_mode", ["hard", "soft"])
@pytest.mark.failover_reboot @pytest.mark.failover_reboot
def test_lose_storage_node_host( def test_lose_storage_node_host(
@ -135,7 +134,7 @@ class TestFailoverStorage(ClusterTestBase):
oid, oid,
2, 2,
shell=self.shell, shell=self.shell,
nodes=list(set(self.cluster.storage_nodes) - {node}), nodes=list(set(self.cluster.storage_nodes) - {*stopped_nodes}),
) )
assert all(old_node not in new_nodes for old_node in nodes) assert all(old_node not in new_nodes for old_node in nodes)
@ -157,7 +156,7 @@ class TestFailoverStorage(ClusterTestBase):
) )
assert get_file_hash(source_file_path) == get_file_hash(got_file_path) assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@allure.title("Panic storage node's host (sequenced_reboots={sequence})") @allure.title("Panic reboot nodes (sequenced_reboots={sequence})")
@pytest.mark.parametrize("sequence", [True, False]) @pytest.mark.parametrize("sequence", [True, False])
@pytest.mark.failover_panic @pytest.mark.failover_panic
def test_panic_storage_node_host( def test_panic_storage_node_host(
@ -229,7 +228,7 @@ class TestFailoverStorage(ClusterTestBase):
) )
assert get_file_hash(source_file_path) == get_file_hash(got_file_path) assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@allure.title("{s3_client}: Do not ignore unhealthy tree endpoints") @allure.title("Do not ignore unhealthy tree endpoints (s3_client={s3_client})")
def test_unhealthy_tree( def test_unhealthy_tree(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -289,12 +288,8 @@ class TestEmptyMap(ClusterTestBase):
include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster) include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.remove(node) stopped_nodes.remove(node)
@test_case.title("Test makes network map empty (offline all storage nodes)")
@test_case.priority(test_case.TestCasePriority.HIGH)
@test_case.suite_name("failovers")
@test_case.suite_section("test_failover_storage")
@pytest.mark.failover_empty_map_offlne @pytest.mark.failover_empty_map_offlne
@allure.title("{s3_client}: empty network map (offline all storage nodes)") @allure.title("Empty network map via offline all storage nodes (s3_client={s3_client})")
def test_offline_all_storage_nodes( def test_offline_all_storage_nodes(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -332,8 +327,8 @@ class TestEmptyMap(ClusterTestBase):
storage_nodes = self.cluster.storage_nodes storage_nodes = self.cluster.storage_nodes
with allure.step("Exclude all storage nodes from network map"): with allure.step("Exclude all storage nodes from network map"):
for node in storage_nodes: for node in storage_nodes:
exclude_node_from_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.append(node) stopped_nodes.append(node)
exclude_node_from_network_map(node, node, shell=self.shell, cluster=self.cluster)
with allure.step("Return all storage nodes to network map"): with allure.step("Return all storage nodes to network map"):
for node in storage_nodes: for node in storage_nodes:
@ -359,12 +354,8 @@ class TestEmptyMap(ClusterTestBase):
check_node_in_map(node, shell=self.shell, alive_node=node) check_node_in_map(node, shell=self.shell, alive_node=node)
stopped_nodes.remove(node) stopped_nodes.remove(node)
@test_case.title("Test makes network map empty (stop storage service on all nodes)")
@test_case.priority(test_case.TestCasePriority.HIGH)
@test_case.suite_name("failovers")
@test_case.suite_section("test_failover_storage")
@pytest.mark.failover_empty_map_stop_service @pytest.mark.failover_empty_map_stop_service
@allure.title("{s3_client}: empty network map (stop storage service on all nodes)") @allure.title("Empty network map via stop all storage services (s3_client={s3_client})")
def test_stop_all_storage_nodes( def test_stop_all_storage_nodes(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -405,8 +396,8 @@ class TestEmptyMap(ClusterTestBase):
with allure.step("Stop all storage nodes"): with allure.step("Stop all storage nodes"):
for node in self.cluster.storage_nodes: for node in self.cluster.storage_nodes:
with allure.step(f"Stop storage service on node: {node}"): with allure.step(f"Stop storage service on node: {node}"):
node.stop_service()
stopped_nodes.append(node) stopped_nodes.append(node)
node.stop_service()
with allure.step("Remove all nodes from network map"): with allure.step("Remove all nodes from network map"):
remove_nodes_from_map_morph( remove_nodes_from_map_morph(
@ -439,7 +430,7 @@ class TestEmptyMap(ClusterTestBase):
check_node_in_map(node, shell=self.shell, alive_node=node) check_node_in_map(node, shell=self.shell, alive_node=node)
stopped_nodes.remove(node) stopped_nodes.remove(node)
@allure.title("{s3_client}: Object loss from fstree/blobovnicza, versioning is enabled") @allure.title("Object loss from fstree/blobovnicza (versioning=enabled, s3_client={s3_client})")
def test_s3_fstree_blobovnicza_loss_versioning_on( def test_s3_fstree_blobovnicza_loss_versioning_on(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -484,7 +475,9 @@ class TestEmptyMap(ClusterTestBase):
with allure.step("Delete bucket"): with allure.step("Delete bucket"):
s3_client.delete_bucket(bucket) s3_client.delete_bucket(bucket)
@allure.title("{s3_client}: Object loss from fstree/blobovnicza, versioning is disabled") @allure.title(
"Object loss from fstree/blobovnicza (versioning=disabled, s3_client={s3_client})"
)
def test_s3_fstree_blobovnicza_loss_versioning_off( def test_s3_fstree_blobovnicza_loss_versioning_off(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -526,7 +519,7 @@ class TestEmptyMap(ClusterTestBase):
[VersioningStatus.ENABLED, VersioningStatus.UNDEFINED], [VersioningStatus.ENABLED, VersioningStatus.UNDEFINED],
) )
@allure.title( @allure.title(
"{s3_client}: After Pilorama.db loss on all nodes list objects should return nothing in second listing (versioning_status {versioning_status})" "After Pilorama.db loss on all nodes list objects should return nothing in second listing (versioning_status={versioning_status}, s3_client={s3_client})"
) )
def test_s3_pilorama_loss( def test_s3_pilorama_loss(
self, self,
@ -584,7 +577,7 @@ class TestStorageDataLoss(ClusterTestBase):
return piloramas return piloramas
@allure.title( @allure.title(
"{s3_client}: After metabase loss on all nodes operations on objects and buckets should be still available via S3" "After metabase loss on all nodes operations on objects and buckets should be still available via S3 (s3_client={s3_client})"
) )
@pytest.mark.metabase_loss @pytest.mark.metabase_loss
def test_metabase_loss( def test_metabase_loss(
@ -737,7 +730,7 @@ class TestStorageDataLoss(ClusterTestBase):
assert not exception_messages, "\n".join(exception_messages) assert not exception_messages, "\n".join(exception_messages)
@allure.title( @allure.title(
"{s3_client}: Loss of one node should trigger use of tree and storage service in another node" "Loss of one node should trigger use of tree and storage service in another node (s3_client={s3_client})"
) )
def test_s3_one_endpoint_loss( def test_s3_one_endpoint_loss(
self, self,
@ -763,7 +756,9 @@ class TestStorageDataLoss(ClusterTestBase):
put_object = s3_client.put_object(bucket, file_path) put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name]) s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
@allure.title("{s3_client}: After Pilorama.db loss on one node object are retrievable") @allure.title(
"After Pilorama.db loss on one node object is retrievable (s3_client={s3_client})"
)
def test_s3_one_pilorama_loss( def test_s3_one_pilorama_loss(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,

View file

@ -225,7 +225,7 @@ class TestNodeManagement(ClusterTestBase):
], ],
) )
@pytest.mark.node_mgmt @pytest.mark.node_mgmt
@allure.title("Object should have {expected_copies} copies with policy {placement_rule}") @allure.title("Placement policy (copies={expected_copies}, policy={placement_rule})")
def test_placement_policy( def test_placement_policy(
self, default_wallet, placement_rule, expected_copies, simple_object_size: ObjectSize self, default_wallet, placement_rule, expected_copies, simple_object_size: ObjectSize
): ):
@ -286,9 +286,7 @@ class TestNodeManagement(ClusterTestBase):
], ],
) )
@pytest.mark.node_mgmt @pytest.mark.node_mgmt
@allure.title( @allure.title("Placement policy (nodes_id={expected_nodes_id}, policy={placement_rule})")
"Object should have copies on nodes {expected_nodes_id} with policy {placement_rule}"
)
def test_placement_policy_with_nodes( def test_placement_policy_with_nodes(
self, self,
default_wallet, default_wallet,
@ -318,7 +316,7 @@ class TestNodeManagement(ClusterTestBase):
], ],
) )
@pytest.mark.node_mgmt @pytest.mark.node_mgmt
@allure.title("[NEGATIVE] Placement policy: {placement_rule}") @allure.title("[NEGATIVE] Placement policy (policy={placement_rule})")
def test_placement_policy_negative( def test_placement_policy_negative(
self, default_wallet, placement_rule, expected_copies, simple_object_size: ObjectSize self, default_wallet, placement_rule, expected_copies, simple_object_size: ObjectSize
): ):
@ -331,7 +329,7 @@ class TestNodeManagement(ClusterTestBase):
self.validate_object_copies(wallet, placement_rule, file_path, expected_copies) self.validate_object_copies(wallet, placement_rule, file_path, expected_copies)
@pytest.mark.node_mgmt @pytest.mark.node_mgmt
@allure.title("FrostFS object could be dropped using control command") @allure.title("Drop object using control command")
def test_drop_object( def test_drop_object(
self, default_wallet, complex_object_size: ObjectSize, simple_object_size: ObjectSize self, default_wallet, complex_object_size: ObjectSize, simple_object_size: ObjectSize
): ):

View file

@ -29,7 +29,6 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_content, get_file_hash from frostfs_testlib.utils.file_utils import generate_file, get_file_content, get_file_hash
from pytest import FixtureRequest
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -90,18 +89,15 @@ def generate_ranges(
@pytest.fixture( @pytest.fixture(
params=[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
# Scope session to upload/delete each files set only once # Scope session to upload/delete each files set only once
scope="module", scope="module"
) )
def storage_objects( def storage_objects(
default_wallet: str, client_shell: Shell, cluster: Cluster, request: FixtureRequest default_wallet: str, client_shell: Shell, cluster: Cluster, object_size: ObjectSize
) -> list[StorageObjectInfo]: ) -> list[StorageObjectInfo]:
wallet = default_wallet wallet = default_wallet
# Separate containers for complex/simple objects to avoid side-effects # Separate containers for complex/simple objects to avoid side-effects
cid = create_container(wallet, shell=client_shell, endpoint=cluster.default_rpc_endpoint) cid = create_container(wallet, shell=client_shell, endpoint=cluster.default_rpc_endpoint)
object_size: ObjectSize = request.param
file_path = generate_file(object_size.value) file_path = generate_file(object_size.value)
file_hash = get_file_hash(file_path) file_hash = get_file_hash(file_path)
@ -138,19 +134,15 @@ def storage_objects(
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
class TestObjectApi(ClusterTestBase): class TestObjectApi(ClusterTestBase):
@allure.title("Validate object storage policy by native API for {storage_objects}") @allure.title("Storage policy by native API (obj_size={object_size})")
def test_object_storage_policies( def test_object_storage_policies(
self, self,
request: FixtureRequest,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
): ):
""" """
Validate object storage policy Validate object storage policy
""" """
allure.dynamic.title(
f"Validate object storage policy by native API for {request.node.callspec.id}"
)
with allure.step("Validate storage policy for objects"): with allure.step("Validate storage policy for objects"):
for storage_object in storage_objects: for storage_object in storage_objects:
@ -172,14 +164,11 @@ class TestObjectApi(ClusterTestBase):
) )
assert copies == 2, "Expected 2 copies" assert copies == 2, "Expected 2 copies"
@allure.title("Validate get object native API for {storage_objects}") @allure.title("Get object by native API (obj_size={object_size})")
def test_get_object_api( def test_get_object_api(self, storage_objects: list[StorageObjectInfo]):
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
""" """
Validate get object native API Validate get object native API
""" """
allure.dynamic.title(f"Validate get object native API for {request.node.callspec.id}")
with allure.step("Get objects and compare hashes"): with allure.step("Get objects and compare hashes"):
for storage_object in storage_objects: for storage_object in storage_objects:
@ -193,14 +182,11 @@ class TestObjectApi(ClusterTestBase):
file_hash = get_file_hash(file_path) file_hash = get_file_hash(file_path)
assert storage_object.file_hash == file_hash assert storage_object.file_hash == file_hash
@allure.title("Validate head object native API for {storage_objects}") @allure.title("Head object by native API (obj_size={object_size})")
def test_head_object_api( def test_head_object_api(self, storage_objects: list[StorageObjectInfo]):
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
""" """
Validate head object native API Validate head object native API
""" """
allure.dynamic.title(f"Validate head object by native API for {request.node.callspec.id}")
storage_object_1 = storage_objects[0] storage_object_1 = storage_objects[0]
storage_object_2 = storage_objects[1] storage_object_2 = storage_objects[1]
@ -222,14 +208,11 @@ class TestObjectApi(ClusterTestBase):
) )
self.check_header_is_presented(head_info, storage_object_2.attributes) self.check_header_is_presented(head_info, storage_object_2.attributes)
@allure.title("Validate object search by native API for {storage_objects}") @allure.title("Search objects by native API (obj_size={object_size})")
def test_search_object_api( def test_search_object_api(self, storage_objects: list[StorageObjectInfo]):
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
""" """
Validate object search by native API Validate object search by native API
""" """
allure.dynamic.title(f"Validate object search by native API for {request.node.callspec.id}")
oids = [storage_object.oid for storage_object in storage_objects] oids = [storage_object.oid for storage_object in storage_objects]
wallet = storage_objects[0].wallet_file_path wallet = storage_objects[0].wallet_file_path
@ -266,12 +249,7 @@ class TestObjectApi(ClusterTestBase):
) )
assert sorted(expected_oids) == sorted(result) assert sorted(expected_oids) == sorted(result)
@allure.title("Validate object search with removed items for {object_size}") @allure.title("Search objects with removed items (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
def test_object_search_should_return_tombstone_items( def test_object_search_should_return_tombstone_items(
self, default_wallet: str, object_size: ObjectSize self, default_wallet: str, object_size: ObjectSize
): ):
@ -336,18 +314,13 @@ class TestObjectApi(ClusterTestBase):
object_type == "TOMBSTONE" object_type == "TOMBSTONE"
), f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}" ), f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}"
@allure.title("Validate native get_range_hash object API for {storage_objects}") @allure.title("Get range hash by native API (obj_size={object_size})")
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
def test_object_get_range_hash( def test_object_get_range_hash(self, storage_objects: list[StorageObjectInfo], max_object_size):
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], max_object_size
):
""" """
Validate get_range_hash for object by native gRPC API Validate get_range_hash for object by native gRPC API
""" """
allure.dynamic.title(
f"Validate native get_range_hash object API for {request.node.callspec.id}"
)
wallet = storage_objects[0].wallet_file_path wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid cid = storage_objects[0].cid
@ -375,16 +348,13 @@ class TestObjectApi(ClusterTestBase):
get_file_hash(file_path, range_len, range_start) == range_hash get_file_hash(file_path, range_len, range_start) == range_hash
), f"Expected range hash to match {range_cut} slice of file payload" ), f"Expected range hash to match {range_cut} slice of file payload"
@allure.title("Validate native get_range object API for {storage_objects}") @allure.title("Get range by native API (obj_size={object_size})")
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
def test_object_get_range( def test_object_get_range(self, storage_objects: list[StorageObjectInfo], max_object_size):
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], max_object_size
):
""" """
Validate get_range for object by native gRPC API Validate get_range for object by native gRPC API
""" """
allure.dynamic.title(f"Validate native get_range object API for {request.node.callspec.id}")
wallet = storage_objects[0].wallet_file_path wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid cid = storage_objects[0].cid
@ -415,22 +385,16 @@ class TestObjectApi(ClusterTestBase):
== range_content == range_content
), f"Expected range content to match {range_cut} slice of file payload" ), f"Expected range content to match {range_cut} slice of file payload"
@allure.title( @allure.title("[NEGATIVE] Get invalid range by native API (obj_size={object_size})")
"[NEGATIVE] Invalid range in get_range native object API should return error for {storage_objects}"
)
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
def test_object_get_range_negatives( def test_object_get_range_negatives(
self, self,
request: FixtureRequest,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
): ):
""" """
Validate get_range negative for object by native gRPC API Validate get_range negative for object by native gRPC API
""" """
allure.dynamic.title(
f"[NEGATIVE] Invalid range in get_range native object API should return error for {request.node.callspec.id}"
)
wallet = storage_objects[0].wallet_file_path wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid cid = storage_objects[0].cid
@ -474,20 +438,14 @@ class TestObjectApi(ClusterTestBase):
range_cut=range_cut, range_cut=range_cut,
) )
@allure.title( @allure.title("[NEGATIVE] Get invalid range hash by native API (obj_size={object_size})")
"[NEGATIVE] Invalid range in get_range_hash native object API should return error for {storage_objects}"
)
def test_object_get_range_hash_negatives( def test_object_get_range_hash_negatives(
self, self,
request: FixtureRequest,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
): ):
""" """
Validate get_range_hash negative for object by native gRPC API Validate get_range_hash negative for object by native gRPC API
""" """
allure.dynamic.title(
f"[NEGATIVE] Invalid range in get_range_hash native object API should return error for {request.node.callspec.id}"
)
wallet = storage_objects[0].wallet_file_path wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid cid = storage_objects[0].cid

View file

@ -64,13 +64,12 @@ def user_container(
def storage_objects( def storage_objects(
user_container: StorageContainer, user_container: StorageContainer,
bearer_token_file_all_allow: str, bearer_token_file_all_allow: str,
request: FixtureRequest, object_size: ObjectSize,
client_shell: Shell, client_shell: Shell,
cluster: Cluster, cluster: Cluster,
) -> list[StorageObjectInfo]: ) -> list[StorageObjectInfo]:
epoch = get_epoch(client_shell, cluster) epoch = get_epoch(client_shell, cluster)
storage_objects: list[StorageObjectInfo] = [] storage_objects: list[StorageObjectInfo] = []
object_size: ObjectSize = request.param
for node in cluster.storage_nodes: for node in cluster.storage_nodes:
storage_objects.append( storage_objects.append(
user_container.generate_object( user_container.generate_object(
@ -87,27 +86,18 @@ def storage_objects(
@pytest.mark.bearer @pytest.mark.bearer
class TestObjectApiWithBearerToken(ClusterTestBase): class TestObjectApiWithBearerToken(ClusterTestBase):
@allure.title( @allure.title(
"Object can be deleted from any node using s3gate wallet with bearer token for {storage_objects}" "Object can be deleted from any node using s3gate wallet with bearer token (obj_size={object_size})"
) )
@pytest.mark.parametrize( @pytest.mark.parametrize(
"storage_objects,user_container", "user_container",
[ [SINGLE_PLACEMENT_RULE],
(pytest.lazy_fixture("simple_object_size"), SINGLE_PLACEMENT_RULE),
(pytest.lazy_fixture("complex_object_size"), SINGLE_PLACEMENT_RULE),
],
ids=["simple object size", "complex object size"],
indirect=True, indirect=True,
) )
def test_delete_object_with_s3_wallet_bearer( def test_delete_object_with_s3_wallet_bearer(
self, self,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
bearer_token_file_all_allow: str, bearer_token_file_all_allow: str,
request: FixtureRequest,
): ):
allure.dynamic.title(
f"Object can be deleted from any node using s3gate wallet with bearer token for {request.node.callspec.id}"
)
s3_gate_wallet = self.cluster.s3_gates[0] s3_gate_wallet = self.cluster.s3_gates[0]
with allure.step("Try to delete each object from first storage node"): with allure.step("Try to delete each object from first storage node"):
for storage_object in storage_objects: for storage_object in storage_objects:
@ -123,16 +113,12 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
) )
@allure.title( @allure.title(
"Object can be fetched from any node using s3gate wallet with bearer token for {object_size}" "Object can be fetched from any node using s3gate wallet with bearer token (obj_size={object_size})"
) )
@pytest.mark.parametrize( @pytest.mark.parametrize(
"object_size, user_container", "user_container",
[ [REP_2_FOR_3_NODES_PLACEMENT_RULE],
(pytest.lazy_fixture("simple_object_size"), REP_2_FOR_3_NODES_PLACEMENT_RULE), indirect=True,
(pytest.lazy_fixture("complex_object_size"), REP_2_FOR_3_NODES_PLACEMENT_RULE),
],
ids=["simple object size", "complex object size"],
indirect=["user_container"],
) )
def test_get_object_with_s3_wallet_bearer_from_all_nodes( def test_get_object_with_s3_wallet_bearer_from_all_nodes(
self, self,

View file

@ -22,12 +22,7 @@ logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
class TestObjectApiLifetime(ClusterTestBase): class TestObjectApiLifetime(ClusterTestBase):
@allure.title("Object should be removed when lifetime expired for {object_size}") @allure.title("Object is removed when lifetime expired (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
def test_object_api_lifetime(self, default_wallet: str, object_size: ObjectSize): def test_object_api_lifetime(self, default_wallet: str, object_size: ObjectSize):
""" """
Test object deleted after expiration epoch. Test object deleted after expiration epoch.

View file

@ -35,7 +35,6 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
from pytest import FixtureRequest
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
@ -71,12 +70,11 @@ def locked_storage_object(
user_container: StorageContainer, user_container: StorageContainer,
client_shell: Shell, client_shell: Shell,
cluster: Cluster, cluster: Cluster,
request: FixtureRequest, object_size: ObjectSize,
): ):
""" """
Intention of this fixture is to provide storage object which is NOT expected to be deleted during test act phase Intention of this fixture is to provide storage object which is NOT expected to be deleted during test act phase
""" """
object_size: ObjectSize = request.param
with allure.step("Creating locked object"): with allure.step("Creating locked object"):
current_epoch = ensure_fresh_epoch(client_shell, cluster) current_epoch = ensure_fresh_epoch(client_shell, cluster)
expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME
@ -127,7 +125,9 @@ def locked_storage_object(
@wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME)) @wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME))
def check_object_not_found(wallet_file_path: str, cid: str, oid: str, shell: Shell, rpc_endpoint: str): def check_object_not_found(
wallet_file_path: str, cid: str, oid: str, shell: Shell, rpc_endpoint: str
):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND): with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object( head_object(
wallet_file_path, wallet_file_path,
@ -137,7 +137,10 @@ def check_object_not_found(wallet_file_path: str, cid: str, oid: str, shell: She
rpc_endpoint, rpc_endpoint,
) )
def verify_object_available(wallet_file_path: str, cid: str, oid: str, shell: Shell, rpc_endpoint: str):
def verify_object_available(
wallet_file_path: str, cid: str, oid: str, shell: Shell, rpc_endpoint: str
):
with expect_not_raises(): with expect_not_raises():
head_object( head_object(
wallet_file_path, wallet_file_path,
@ -147,18 +150,18 @@ def verify_object_available(wallet_file_path: str, cid: str, oid: str, shell: Sh
rpc_endpoint, rpc_endpoint,
) )
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_object_lock @pytest.mark.grpc_object_lock
class TestObjectLockWithGrpc(ClusterTestBase): class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.fixture() @pytest.fixture()
def new_locked_storage_object( def new_locked_storage_object(
self, user_container: StorageContainer, request: FixtureRequest self, user_container: StorageContainer, object_size: ObjectSize
) -> StorageObjectInfo: ) -> StorageObjectInfo:
""" """
Intention of this fixture is to provide new storage object for tests which may delete or corrupt the object or it's complementary objects Intention of this fixture is to provide new storage object for tests which may delete or corrupt the object or it's complementary objects
So we need a new one each time we ask for it So we need a new one each time we ask for it
""" """
object_size: ObjectSize = request.param
with allure.step("Creating locked object"): with allure.step("Creating locked object"):
current_epoch = self.get_epoch() current_epoch = self.get_epoch()
@ -176,25 +179,14 @@ class TestObjectLockWithGrpc(ClusterTestBase):
return storage_object return storage_object
@allure.title("Locked object should be protected from deletion for {locked_storage_object}") @allure.title("Locked object is protected from deletion (obj_size={object_size})")
@pytest.mark.parametrize(
"locked_storage_object",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
indirect=True,
)
def test_locked_object_cannot_be_deleted( def test_locked_object_cannot_be_deleted(
self, self,
request: FixtureRequest,
locked_storage_object: StorageObjectInfo, locked_storage_object: StorageObjectInfo,
): ):
""" """
Locked object should be protected from deletion Locked object should be protected from deletion
""" """
allure.dynamic.title(
f"Locked object should be protected from deletion for {request.node.callspec.id}"
)
with pytest.raises(Exception, match=OBJECT_IS_LOCKED): with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object( delete_object(
locked_storage_object.wallet_file_path, locked_storage_object.wallet_file_path,
@ -204,11 +196,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
self.cluster.default_rpc_endpoint, self.cluster.default_rpc_endpoint,
) )
@allure.title("Lock object itself should be protected from deletion") @allure.title("Lock object itself is protected from deletion")
# We operate with only lock object here so no complex object needed in this test # We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize( @pytest.mark.parametrize("object_size", ["simple"], indirect=True)
"locked_storage_object", [pytest.lazy_fixture("simple_object_size")], indirect=True
)
def test_lock_object_itself_cannot_be_deleted( def test_lock_object_itself_cannot_be_deleted(
self, self,
locked_storage_object: StorageObjectInfo, locked_storage_object: StorageObjectInfo,
@ -231,9 +221,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@allure.title("Lock object itself cannot be locked") @allure.title("Lock object itself cannot be locked")
# We operate with only lock object here so no complex object needed in this test # We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize( @pytest.mark.parametrize("object_size", ["simple"], indirect=True)
"locked_storage_object", [pytest.lazy_fixture("simple_object_size")], indirect=True
)
def test_lock_object_cannot_be_locked( def test_lock_object_cannot_be_locked(
self, self,
locked_storage_object: StorageObjectInfo, locked_storage_object: StorageObjectInfo,
@ -259,9 +247,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
"Lock must contain valid lifetime or expire_at field: (lifetime={wrong_lifetime}, expire-at={wrong_expire_at})" "Lock must contain valid lifetime or expire_at field: (lifetime={wrong_lifetime}, expire-at={wrong_expire_at})"
) )
# We operate with only lock object here so no complex object needed in this test # We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize( @pytest.mark.parametrize("object_size", ["simple"], indirect=True)
"locked_storage_object", [pytest.lazy_fixture("simple_object_size")], indirect=True
)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"wrong_lifetime,wrong_expire_at,expected_error", "wrong_lifetime,wrong_expire_at,expected_error",
[ [
@ -298,12 +284,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
expire_at=wrong_expire_at, expire_at=wrong_expire_at,
) )
@allure.title("Expired object should be deleted after locks are expired for {object_size}") @allure.title("Expired object is deleted when locks are expired (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
def test_expired_object_should_be_deleted_after_locks_are_expired( def test_expired_object_should_be_deleted_after_locks_are_expired(
self, self,
user_container: StorageContainer, user_container: StorageContainer,
@ -351,18 +332,15 @@ class TestObjectLockWithGrpc(ClusterTestBase):
with allure.step("Wait for object to be deleted after third epoch"): with allure.step("Wait for object to be deleted after third epoch"):
self.tick_epoch() self.tick_epoch()
check_object_not_found(storage_object.wallet_file_path, check_object_not_found(
storage_object.cid, storage_object.wallet_file_path,
storage_object.oid, storage_object.cid,
self.shell, storage_object.oid,
self.cluster.default_rpc_endpoint) self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Should be possible to lock multiple objects at once for {object_size}") @allure.title("Lock multiple objects at once (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
def test_should_be_possible_to_lock_multiple_objects_at_once( def test_should_be_possible_to_lock_multiple_objects_at_once(
self, self,
user_container: StorageContainer, user_container: StorageContainer,
@ -408,12 +386,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
with expect_not_raises(): with expect_not_raises():
delete_objects(storage_objects, self.shell, self.cluster) delete_objects(storage_objects, self.shell, self.cluster)
@allure.title("Already outdated lock should not be applied for {object_size}") @allure.title("Outdated lock cannot be applied (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
def test_already_outdated_lock_should_not_be_applied( def test_already_outdated_lock_should_not_be_applied(
self, self,
user_container: StorageContainer, user_container: StorageContainer,
@ -445,14 +418,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
expire_at=expiration_epoch, expire_at=expiration_epoch,
) )
@allure.title( @allure.title("Delete object when lock is expired by lifetime (obj_size={object_size})")
"After lock expiration with lifetime user should be able to delete object for {object_size}"
)
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
@expect_not_raises() @expect_not_raises()
def test_after_lock_expiration_with_lifetime_user_should_be_able_to_delete_object( def test_after_lock_expiration_with_lifetime_user_should_be_able_to_delete_object(
self, self,
@ -487,14 +453,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
self.cluster.default_rpc_endpoint, self.cluster.default_rpc_endpoint,
) )
@allure.title( @allure.title("Delete object when lock is expired by expire_at (obj_size={object_size})")
"After lock expiration with expire_at user should be able to delete object for {object_size}"
)
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
@expect_not_raises() @expect_not_raises()
def test_after_lock_expiration_with_expire_at_user_should_be_able_to_delete_object( def test_after_lock_expiration_with_expire_at_user_should_be_able_to_delete_object(
self, self,
@ -531,11 +490,11 @@ class TestObjectLockWithGrpc(ClusterTestBase):
self.cluster.default_rpc_endpoint, self.cluster.default_rpc_endpoint,
) )
@allure.title("Complex object chunks should also be protected from deletion") @allure.title("Complex object chunks are protected from deletion")
@pytest.mark.parametrize( @pytest.mark.parametrize(
# Only complex objects are required for this test # Only complex objects are required for this test
"locked_storage_object", "object_size",
[pytest.lazy_fixture("complex_object_size")], ["complex"],
indirect=True, indirect=True,
) )
def test_complex_object_chunks_should_also_be_protected_from_deletion( def test_complex_object_chunks_should_also_be_protected_from_deletion(
@ -560,12 +519,12 @@ class TestObjectLockWithGrpc(ClusterTestBase):
self.cluster.default_rpc_endpoint, self.cluster.default_rpc_endpoint,
) )
@allure.title("Link object of locked complex object can be dropped") @allure.title("Drop link object of locked complex object")
@pytest.mark.grpc_control @pytest.mark.grpc_control
@pytest.mark.parametrize( @pytest.mark.parametrize(
"new_locked_storage_object", "object_size",
# Only complex object is required # Only complex object is required
[pytest.lazy_fixture("complex_object_size")], ["complex"],
indirect=True, indirect=True,
) )
def test_link_object_of_locked_complex_object_can_be_dropped( def test_link_object_of_locked_complex_object_can_be_dropped(
@ -590,12 +549,12 @@ class TestObjectLockWithGrpc(ClusterTestBase):
with expect_not_raises(): with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, link_object_id) drop_object(node, new_locked_storage_object.cid, link_object_id)
@allure.title("Chunks of locked complex object can be dropped") @allure.title("Drop chunks of locked complex object")
@pytest.mark.grpc_control @pytest.mark.grpc_control
@pytest.mark.parametrize( @pytest.mark.parametrize(
"new_locked_storage_object", "object_size",
# Only complex object is required # Only complex object is required
[pytest.lazy_fixture("complex_object_size")], ["complex"],
indirect=True, indirect=True,
) )
def test_chunks_of_locked_complex_object_can_be_dropped( def test_chunks_of_locked_complex_object_can_be_dropped(
@ -617,18 +576,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
with expect_not_raises(): with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, chunk_object_id) drop_object(node, new_locked_storage_object.cid, chunk_object_id)
@allure.title("Locked object with {new_locked_storage_object} can be dropped") @allure.title("Drop locked object (obj_size={object_size})")
@pytest.mark.grpc_control @pytest.mark.grpc_control
@pytest.mark.parametrize( def test_locked_object_can_be_dropped(self, new_locked_storage_object: StorageObjectInfo):
"new_locked_storage_object",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
indirect=True,
)
def test_locked_object_can_be_dropped(
self, new_locked_storage_object: StorageObjectInfo, request: pytest.FixtureRequest
):
allure.dynamic.title(f"Locked {request.node.callspec.id} can be dropped")
nodes_with_object = get_nodes_with_object( nodes_with_object = get_nodes_with_object(
new_locked_storage_object.cid, new_locked_storage_object.cid,
new_locked_storage_object.oid, new_locked_storage_object.oid,
@ -640,11 +590,11 @@ class TestObjectLockWithGrpc(ClusterTestBase):
with expect_not_raises(): with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, new_locked_storage_object.oid) drop_object(node, new_locked_storage_object.cid, new_locked_storage_object.oid)
@allure.title("Link object of complex object should also be protected from deletion") @allure.title("Link object of complex object is protected from deletion")
@pytest.mark.parametrize( @pytest.mark.parametrize(
# Only complex objects are required for this test # Only complex objects are required for this test
"locked_storage_object", "object_size",
[pytest.lazy_fixture("complex_object_size")], ["complex"],
indirect=True, indirect=True,
) )
def test_link_object_of_complex_object_should_also_be_protected_from_deletion( def test_link_object_of_complex_object_should_also_be_protected_from_deletion(
@ -673,25 +623,16 @@ class TestObjectLockWithGrpc(ClusterTestBase):
self.cluster.default_rpc_endpoint, self.cluster.default_rpc_endpoint,
) )
@allure.title("Expired object should be removed after all locks were expired for {object_size}") @allure.title("Expired object is removed after all locks are expired (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
def test_expired_object_should_be_removed_after_relocks_expare_at( def test_expired_object_should_be_removed_after_relocks_expare_at(
self, self,
request: FixtureRequest,
user_container: StorageContainer, user_container: StorageContainer,
object_size: ObjectSize, object_size: ObjectSize,
): ):
allure.dynamic.title(
f"Expired object should be removed after all locks were expired for {request.node.callspec.id}"
)
current_epoch = self.ensure_fresh_epoch() current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + 1) storage_object = user_container.generate_object(
object_size.value, expire_at=current_epoch + 1
)
with allure.step("Apply first lock to object for 3 epochs"): with allure.step("Apply first lock to object for 3 epochs"):
lock_object_id_0 = lock_object( lock_object_id_0 = lock_object(
@ -711,7 +652,8 @@ class TestObjectLockWithGrpc(ClusterTestBase):
storage_object.cid, storage_object.cid,
lock_object_id_0, lock_object_id_0,
self.shell, self.shell,
self.cluster.default_rpc_endpoint) self.cluster.default_rpc_endpoint,
)
with allure.step("Apply second lock to object for 3 more epochs"): with allure.step("Apply second lock to object for 3 more epochs"):
lock_object_id_1 = lock_object( lock_object_id_1 = lock_object(
@ -731,8 +673,8 @@ class TestObjectLockWithGrpc(ClusterTestBase):
storage_object.cid, storage_object.cid,
lock_object_id_0, lock_object_id_0,
self.shell, self.shell,
self.cluster.default_rpc_endpoint self.cluster.default_rpc_endpoint,
) )
with allure.step("Verify second lock is still available"): with allure.step("Verify second lock is still available"):
verify_object_available( verify_object_available(
@ -740,8 +682,8 @@ class TestObjectLockWithGrpc(ClusterTestBase):
storage_object.cid, storage_object.cid,
lock_object_id_1, lock_object_id_1,
self.shell, self.shell,
self.cluster.default_rpc_endpoint self.cluster.default_rpc_endpoint,
) )
with allure.step("Apply third lock to object for 3 more epochs"): with allure.step("Apply third lock to object for 3 more epochs"):
lock_object( lock_object(
@ -760,33 +702,27 @@ class TestObjectLockWithGrpc(ClusterTestBase):
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
self.shell, self.shell,
self.cluster.default_rpc_endpoint self.cluster.default_rpc_endpoint,
) )
@allure.title("Two expired objects with one lock should be deleted after lock expiration for {object_size}") @allure.title(
@pytest.mark.parametrize( "Two expired objects with one lock are deleted after lock expiration (obj_size={object_size})"
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
) )
def test_two_objects_expiration_with_one_lock( def test_two_objects_expiration_with_one_lock(
self, self,
request: FixtureRequest,
user_container: StorageContainer, user_container: StorageContainer,
object_size: ObjectSize, object_size: ObjectSize,
): ):
allure.dynamic.title(
f"Two expired objects with one lock should be deleted after lock expiration for {request.node.callspec.id}"
)
current_epoch = self.ensure_fresh_epoch() current_epoch = self.ensure_fresh_epoch()
storage_objects: list[StorageObjectInfo] = [] storage_objects: list[StorageObjectInfo] = []
with allure.step("Generate two objects"): with allure.step("Generate two objects"):
for epoch_i in range(2): for epoch_i in range(2):
storage_objects.append( storage_objects.append(
user_container.generate_object(object_size.value, expire_at=current_epoch + epoch_i + 3) user_container.generate_object(
object_size.value, expire_at=current_epoch + epoch_i + 3
)
) )
self.tick_epoch() self.tick_epoch()
@ -803,16 +739,16 @@ class TestObjectLockWithGrpc(ClusterTestBase):
with allure.step("Verify objects are available during next three epochs"): with allure.step("Verify objects are available during next three epochs"):
for epoch_i in range(3): for epoch_i in range(3):
self.tick_epoch() self.tick_epoch()
with allure.step(f"Check objects at epoch {current_epoch + epoch_i + 2}"): with allure.step(f"Check objects at epoch {current_epoch + epoch_i + 2}"):
for storage_object in storage_objects: for storage_object in storage_objects:
verify_object_available( verify_object_available(
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
self.shell, self.shell,
self.cluster.default_rpc_endpoint self.cluster.default_rpc_endpoint,
) )
with allure.step("Verify objects are deleted after lock was expired"): with allure.step("Verify objects are deleted after lock was expired"):
self.tick_epoch() self.tick_epoch()
@ -822,6 +758,5 @@ class TestObjectLockWithGrpc(ClusterTestBase):
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
self.shell, self.shell,
self.cluster.default_rpc_endpoint self.cluster.default_rpc_endpoint,
) )

View file

@ -27,12 +27,7 @@ class TestReplication(ClusterTestBase):
yield yield
cluster_state_controller.start_stopped_hosts() cluster_state_controller.start_stopped_hosts()
@pytest.mark.parametrize( @allure.title("Replication (obj_size={object_size})")
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
@allure.title("Test replication for {object_size}")
def test_replication( def test_replication(
self, self,
default_wallet: str, default_wallet: str,

View file

@ -95,11 +95,6 @@ class Test_http_bearer(ClusterTestBase):
) )
@pytest.mark.skip("Temp disable for v0.37") @pytest.mark.skip("Temp disable for v0.37")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
def test_put_with_bearer_when_eacl_restrict( def test_put_with_bearer_when_eacl_restrict(
self, self,
object_size: ObjectSize, object_size: ObjectSize,

View file

@ -218,7 +218,7 @@ class TestHttpPut(ClusterTestBase):
http_hostname=self.cluster.default_http_hostname[0], http_hostname=self.cluster.default_http_hostname[0],
) )
@allure.title("Test Expiration-Epoch in HTTP header with epoch_gap={epoch_gap}") @allure.title("Expiration-Epoch in HTTP header (epoch_gap={epoch_gap})")
@pytest.mark.parametrize("epoch_gap", [0, 1]) @pytest.mark.parametrize("epoch_gap", [0, 1])
def test_expiration_epoch_in_http(self, simple_object_size: ObjectSize, epoch_gap: int): def test_expiration_epoch_in_http(self, simple_object_size: ObjectSize, epoch_gap: int):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
@ -283,7 +283,7 @@ class TestHttpPut(ClusterTestBase):
http_hostname=self.cluster.default_http_hostname[0], http_hostname=self.cluster.default_http_hostname[0],
) )
@allure.title("Test Zip in HTTP header") @allure.title("Zip in HTTP header")
def test_zip_in_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize): def test_zip_in_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
cid = create_container( cid = create_container(
self.wallet, self.wallet,

View file

@ -22,7 +22,6 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from pytest import FixtureRequest
OBJECT_ALREADY_REMOVED_ERROR = "object already removed" OBJECT_ALREADY_REMOVED_ERROR = "object already removed"
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -47,19 +46,11 @@ class Test_http_headers(ClusterTestBase):
def prepare_wallet(self, default_wallet): def prepare_wallet(self, default_wallet):
Test_http_headers.wallet = default_wallet Test_http_headers.wallet = default_wallet
@pytest.fixture( def storage_objects_with_attributes(self, object_size: ObjectSize) -> list[StorageObjectInfo]:
params=[ # TODO: Deal with http tests
pytest.lazy_fixture("simple_object_size"), if object_size.value > 1000:
# TODO: Temp disable for v0.37 pytest.skip("Complex objects for HTTP temporarly disabled for v0.37")
# pytest.lazy_fixture("complex_object_size"),
],
# TODO: Temp disable for v0.37
# ids=["simple object size", "complex object size"],
ids=["simple object size"],
scope="class",
)
def storage_objects_with_attributes(self, request: FixtureRequest) -> list[StorageObjectInfo]:
object_size: ObjectSize = request.param
storage_objects = [] storage_objects = []
wallet = self.wallet wallet = self.wallet
cid = create_container( cid = create_container(

View file

@ -27,12 +27,7 @@ class Test_http_object(ClusterTestBase):
def prepare_wallet(self, default_wallet): def prepare_wallet(self, default_wallet):
Test_http_object.wallet = default_wallet Test_http_object.wallet = default_wallet
@allure.title("Put over gRPC, Get over HTTP for {object_size}") @allure.title("Put over gRPC, Get over HTTP with attributes (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
def test_object_put_get_attributes(self, object_size: ObjectSize): def test_object_put_get_attributes(self, object_size: ObjectSize):
""" """
Test that object can be put using gRPC interface and get using HTTP. Test that object can be put using gRPC interface and get using HTTP.

View file

@ -24,13 +24,8 @@ class Test_http_streaming(ClusterTestBase):
def prepare_wallet(self, default_wallet): def prepare_wallet(self, default_wallet):
Test_http_streaming.wallet = default_wallet Test_http_streaming.wallet = default_wallet
@allure.title("Test Put via pipe (steaming), Get over HTTP and verify hashes") @allure.title("Put via pipe (streaming), Get over HTTP and verify hashes")
@pytest.mark.parametrize( def test_object_can_be_put_get_by_streaming(self, complex_object_size: ObjectSize):
"object_size",
[pytest.lazy_fixture("complex_object_size")],
ids=["complex object size"],
)
def test_object_can_be_put_get_by_streaming(self, object_size: ObjectSize):
""" """
Test that object can be put using gRPC interface and get using HTTP. Test that object can be put using gRPC interface and get using HTTP.
@ -53,7 +48,7 @@ class Test_http_streaming(ClusterTestBase):
) )
with allure.step("Allocate big object"): with allure.step("Allocate big object"):
# Generate file # Generate file
file_path = generate_file(object_size.value) file_path = generate_file(complex_object_size.value)
with allure.step( with allure.step(
"Put objects using curl utility and Get object and verify hashes [ get/$CID/$OID ]" "Put objects using curl utility and Get object and verify hashes [ get/$CID/$OID ]"

View file

@ -209,12 +209,7 @@ class Test_http_system_header(ClusterTestBase):
error_pattern=f"{EXPIRATION_EXPIRATION_RFC} must be in the future", error_pattern=f"{EXPIRATION_EXPIRATION_RFC} must be in the future",
) )
@allure.title("Priority of attributes epoch>duration for {object_size}") @allure.title("Priority of attributes epoch>duration (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
@pytest.mark.skip("Temp disable for v0.37") @pytest.mark.skip("Temp disable for v0.37")
def test_http_attr_priority_epoch_duration( def test_http_attr_priority_epoch_duration(
self, user_container: str, object_size: ObjectSize, epoch_duration: int self, user_container: str, object_size: ObjectSize, epoch_duration: int
@ -256,12 +251,7 @@ class Test_http_system_header(ClusterTestBase):
self.wallet, user_container, oid, self.shell, self.cluster self.wallet, user_container, oid, self.shell, self.cluster
) )
@allure.title("Priority of attributes duration>timestamp for {object_size}") @allure.title("Priority of attributes duration>timestamp (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
@pytest.mark.skip("Temp disable for v0.37") @pytest.mark.skip("Temp disable for v0.37")
def test_http_attr_priority_dur_timestamp( def test_http_attr_priority_dur_timestamp(
self, user_container: str, object_size: ObjectSize, epoch_duration: int self, user_container: str, object_size: ObjectSize, epoch_duration: int
@ -310,12 +300,7 @@ class Test_http_system_header(ClusterTestBase):
self.wallet, user_container, oid, self.shell, self.cluster self.wallet, user_container, oid, self.shell, self.cluster
) )
@allure.title("Priority of attributes timestamp>Expiration-RFC for {object_size}") @allure.title("Priority of attributes timestamp>Expiration-RFC (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
@pytest.mark.skip("Temp disable for v0.37") @pytest.mark.skip("Temp disable for v0.37")
def test_http_attr_priority_timestamp_rfc( def test_http_attr_priority_timestamp_rfc(
self, user_container: str, object_size: ObjectSize, epoch_duration: int self, user_container: str, object_size: ObjectSize, epoch_duration: int
@ -364,14 +349,12 @@ class Test_http_system_header(ClusterTestBase):
self.wallet, user_container, oid, self.shell, self.cluster self.wallet, user_container, oid, self.shell, self.cluster
) )
@allure.title("Object should be deleted when expiration passed for {object_size}") @allure.title("Object should be deleted when expiration passed (obj_size={object_size})")
@pytest.mark.parametrize( @pytest.mark.parametrize(
"object_size", "object_size",
# TODO: Temp disabled for v0.37 # TODO: "complex" temporarly disabled for v0.37
# [pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")], ["simple"],
[pytest.lazy_fixture("simple_object_size")], indirect=True,
# ids=["simple object size", "complex object size"],
ids=["simple object size"],
) )
def test_http_rfc_object_unavailable_after_expir( def test_http_rfc_object_unavailable_after_expir(
self, user_container: str, object_size: ObjectSize, epoch_duration: int self, user_container: str, object_size: ObjectSize, epoch_duration: int

View file

@ -10,7 +10,7 @@ from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.acl @pytest.mark.acl
@pytest.mark.s3_gate @pytest.mark.s3_gate
class TestS3GateACL: class TestS3GateACL:
@allure.title("{s3_client}: Object ACL") @allure.title("Object ACL (s3_client={s3_client})")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True) @pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_s3_object_ACL( def test_s3_object_ACL(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
@ -44,7 +44,7 @@ class TestS3GateACL:
obj_acl = s3_client.get_object_acl(bucket, file_name) obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers") s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
@allure.title("{s3_client}: Bucket ACL") @allure.title("Bucket ACL (s3_client={s3_client})")
@pytest.mark.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True) @pytest.mark.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper): def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with ACL = public-read-write"): with allure.step("Create bucket with ACL = public-read-write"):

View file

@ -17,7 +17,7 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_bucket @pytest.mark.s3_gate_bucket
class TestS3GateBucket: class TestS3GateBucket:
@allure.title("{s3_client}: Create Bucket with different ACL") @allure.title("Create Bucket with different ACL (s3_client={s3_client})")
def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper): def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with ACL private"): with allure.step("Create bucket with ACL private"):
@ -46,7 +46,7 @@ class TestS3GateBucket:
bucket_acl_3 = s3_client.get_bucket_acl(bucket_3) bucket_acl_3 = s3_client.get_bucket_acl(bucket_3)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_3, permitted_users="AllUsers") s3_helper.assert_s3_acl(acl_grants=bucket_acl_3, permitted_users="AllUsers")
@allure.title("{s3_client}: Create Bucket with different ACL by grant") @allure.title("Create Bucket with different ACL by grant (s3_client={s3_client})")
def test_s3_create_bucket_with_grands(self, s3_client: S3ClientWrapper): def test_s3_create_bucket_with_grands(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with --grant-read"): with allure.step("Create bucket with --grant-read"):
@ -73,7 +73,7 @@ class TestS3GateBucket:
bucket_acl_2 = s3_client.get_bucket_acl(bucket_2) bucket_acl_2 = s3_client.get_bucket_acl(bucket_2)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers") s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
@allure.title("{s3_client}: create bucket with object lock") @allure.title("Create bucket with object lock (s3_client={s3_client})")
def test_s3_bucket_object_lock( def test_s3_bucket_object_lock(
self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize
): ):
@ -108,7 +108,7 @@ class TestS3GateBucket:
s3_client, bucket_1, file_name, "COMPLIANCE", date_obj_1, "ON" s3_client, bucket_1, file_name, "COMPLIANCE", date_obj_1, "ON"
) )
@allure.title("{s3_client}: delete bucket") @allure.title("Delete bucket (s3_client={s3_client})")
def test_s3_delete_bucket(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize): def test_s3_delete_bucket(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
file_path_1 = generate_file(simple_object_size.value) file_path_1 = generate_file(simple_object_size.value)
file_name_1 = s3_helper.object_key_from_file_path(file_path_1) file_name_1 = s3_helper.object_key_from_file_path(file_path_1)

View file

@ -34,7 +34,7 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_base @pytest.mark.s3_gate_base
class TestS3Gate: class TestS3Gate:
@allure.title("{s3_client}: Bucket API") @allure.title("Bucket API (s3_client={s3_client})")
def test_s3_buckets( def test_s3_buckets(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -107,25 +107,22 @@ class TestS3Gate:
with pytest.raises(Exception, match=r".*Not Found.*"): with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_1) s3_client.head_bucket(bucket_1)
@allure.title("{s3_client}: Object API for {object_size}") @allure.title("Object API (obj_size={object_size}, s3_client={s3_client})")
@pytest.mark.parametrize( @pytest.mark.parametrize(
"object_size", "object_size",
["simple object size", "complex object size"], ["simple", "complex"],
ids=["simple object size", "complex object size"], indirect=True,
) )
def test_s3_api_object( def test_s3_api_object(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
object_size: str, object_size: ObjectSize,
two_buckets: tuple[str, str], two_buckets: tuple[str, str],
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
): ):
""" """
Test base S3 Object API (Put/Head/List) for simple and complex objects. Test base S3 Object API (Put/Head/List) for simple and complex objects.
""" """
size = simple_object_size if object_size == "simple object size" else complex_object_size file_path = generate_file(object_size.value)
file_path = generate_file(size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
bucket_1, bucket_2 = two_buckets bucket_1, bucket_2 = two_buckets
@ -147,7 +144,7 @@ class TestS3Gate:
for attrs in (["ETag"], ["ObjectSize", "StorageClass"]): for attrs in (["ETag"], ["ObjectSize", "StorageClass"]):
s3_client.get_object_attributes(bucket, file_name, attrs) s3_client.get_object_attributes(bucket, file_name, attrs)
@allure.title("{s3_client}: Sync directory") @allure.title("Sync directory (s3_client={s3_client})")
def test_s3_sync_dir( def test_s3_sync_dir(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):
@ -179,7 +176,7 @@ class TestS3Gate:
key_to_path.get(obj_key) key_to_path.get(obj_key)
), "Expected hashes are the same" ), "Expected hashes are the same"
@allure.title("{s3_client}: Object versioning") @allure.title("Object versioning (s3_client={s3_client})")
def test_s3_api_versioning( def test_s3_api_versioning(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):
@ -259,7 +256,7 @@ class TestS3Gate:
), f"Expected object content is\n{version_2_content}\nGot\n{got_content}" ), f"Expected object content is\n{version_2_content}\nGot\n{got_content}"
@pytest.mark.s3_gate_multipart @pytest.mark.s3_gate_multipart
@allure.title("{s3_client}: Object Multipart API") @allure.title("Object Multipart API (s3_client={s3_client})")
def test_s3_api_multipart( def test_s3_api_multipart(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):
@ -316,7 +313,7 @@ class TestS3Gate:
self.check_object_attributes(s3_client, bucket, object_key, parts_count) self.check_object_attributes(s3_client, bucket, object_key, parts_count)
@allure.title("{s3_client}: Bucket tagging API") @allure.title("Bucket tagging API (s3_client={s3_client})")
def test_s3_api_bucket_tagging(self, s3_client: S3ClientWrapper, bucket: str): def test_s3_api_bucket_tagging(self, s3_client: S3ClientWrapper, bucket: str):
""" """
Test checks S3 Bucket tagging API (Put tag/Get tag). Test checks S3 Bucket tagging API (Put tag/Get tag).
@ -329,7 +326,7 @@ class TestS3Gate:
s3_client.delete_bucket_tagging(bucket) s3_client.delete_bucket_tagging(bucket)
s3_helper.check_tags_by_bucket(s3_client, bucket, []) s3_helper.check_tags_by_bucket(s3_client, bucket, [])
@allure.title("{s3_client}: Object tagging API") @allure.title("Object tagging API (s3_client={s3_client})")
def test_s3_api_object_tagging( def test_s3_api_object_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):
@ -361,7 +358,7 @@ class TestS3Gate:
s3_client.delete_object_tagging(bucket, obj_key) s3_client.delete_object_tagging(bucket, obj_key)
s3_helper.check_tags_by_object(s3_client, bucket, obj_key, []) s3_helper.check_tags_by_object(s3_client, bucket, obj_key, [])
@allure.title("{s3_client}: Delete object & delete objects") @allure.title("Delete object & delete objects (s3_client={s3_client})")
def test_s3_api_delete( def test_s3_api_delete(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -427,7 +424,7 @@ class TestS3Gate:
with pytest.raises(Exception, match="The specified key does not exist"): with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_2, object_key) s3_client.get_object(bucket_2, object_key)
@allure.title("{s3_client}: Copy object to the same bucket") @allure.title("Copy object to the same bucket (s3_client={s3_client})")
def test_s3_copy_same_bucket( def test_s3_copy_same_bucket(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -476,7 +473,7 @@ class TestS3Gate:
unexpected_objects=[file_name_simple], unexpected_objects=[file_name_simple],
) )
@allure.title("{s3_client}: Copy object to another bucket") @allure.title("Copy object to another bucket (s3_client={s3_client})")
def test_s3_copy_to_another_bucket( def test_s3_copy_to_another_bucket(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,

View file

@ -20,7 +20,7 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
@pytest.mark.parametrize("version_id", [None, "second"]) @pytest.mark.parametrize("version_id", [None, "second"])
class TestS3GateLocking: class TestS3GateLocking:
@allure.title( @allure.title(
"{s3_client}: Retention period & legal lock on the object with version_id={version_id}" "Retention period and legal lock on object (version_id={version_id}, s3_client={s3_client})"
) )
def test_s3_object_locking( def test_s3_object_locking(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
@ -77,7 +77,7 @@ class TestS3GateLocking:
s3_client.delete_object(bucket, file_name, version_id) s3_client.delete_object(bucket, file_name, version_id)
@allure.title( @allure.title(
"{s3_client}: Impossible to change the retention mode COMPLIANCE with version_id={version_id}" "Impossible to change retention mode COMPLIANCE (version_id={version_id}, s3_client={s3_client})"
) )
def test_s3_mode_compliance( def test_s3_mode_compliance(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
@ -117,7 +117,9 @@ class TestS3GateLocking:
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.put_object_retention(bucket, file_name, retention, version_id) s3_client.put_object_retention(bucket, file_name, retention, version_id)
@allure.title("{s3_client}: Change retention mode GOVERNANCE with version_id={version_id}") @allure.title(
"Change retention mode GOVERNANCE (version_id={version_id}, s3_client={s3_client})"
)
def test_s3_mode_governance( def test_s3_mode_governance(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
): ):
@ -179,7 +181,9 @@ class TestS3GateLocking:
s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF" s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
) )
@allure.title("{s3_client}: Object Cannot Be Locked with version_id={version_id}") @allure.title(
"[NEGATIVE] Lock object in bucket with disabled locking (version_id={version_id}, s3_client={s3_client})"
)
def test_s3_legal_hold( def test_s3_legal_hold(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
): ):
@ -201,7 +205,7 @@ class TestS3GateLocking:
@pytest.mark.s3_gate @pytest.mark.s3_gate
class TestS3GateLockingBucket: class TestS3GateLockingBucket:
@allure.title("{s3_client}: Bucket Lock") @allure.title("Bucket Lock (s3_client={s3_client})")
def test_s3_bucket_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize): def test_s3_bucket_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)

View file

@ -23,7 +23,7 @@ class TestS3GateMultipart(ClusterTestBase):
"The upload ID may be invalid, or the upload may have been aborted or completed." "The upload ID may be invalid, or the upload may have been aborted or completed."
) )
@allure.title("{s3_client}: Object Multipart API") @allure.title("Object Multipart API (s3_client={s3_client})")
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True) @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
def test_s3_object_multipart(self, s3_client: S3ClientWrapper, bucket: str): def test_s3_object_multipart(self, s3_client: S3ClientWrapper, bucket: str):
parts_count = 5 parts_count = 5
@ -58,7 +58,7 @@ class TestS3GateMultipart(ClusterTestBase):
got_object = s3_client.get_object(bucket, object_key) got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large) assert get_file_hash(got_object) == get_file_hash(file_name_large)
@allure.title("{s3_client}: Abort Multipart Upload") @allure.title("Abort Multipart Upload (s3_client={s3_client})")
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True) @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
def test_s3_abort_multipart( def test_s3_abort_multipart(
self, self,
@ -113,7 +113,7 @@ class TestS3GateMultipart(ClusterTestBase):
) )
assert len(objects) == 0, f"Expected no objects in container, got\n{objects}" assert len(objects) == 0, f"Expected no objects in container, got\n{objects}"
@allure.title("{s3_client}: Upload Part Copy") @allure.title("Upload Part Copy (s3_client={s3_client})")
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True) @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
def test_s3_multipart_copy(self, s3_client: S3ClientWrapper, bucket: str): def test_s3_multipart_copy(self, s3_client: S3ClientWrapper, bucket: str):
parts_count = 3 parts_count = 3

View file

@ -31,35 +31,6 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_object @pytest.mark.s3_gate_object
class TestS3GateObject: class TestS3GateObject:
@allure.title("Set object size for current test")
@pytest.fixture
def object_size(self, request: pytest.FixtureRequest) -> int:
object_size = request.param
return object_size
@allure.title("Put objects in a bucket")
@pytest.fixture
def objects_in_bucket(
self,
s3_client: S3ClientWrapper,
bucket: str,
object_size: int,
request: pytest.FixtureRequest,
) -> list[str]:
objects: list[str] = []
objects_count = int(request.param)
with allure.step(
f"Put {objects_count} objects of size '{object_size}' bytes into bucket '{bucket}'"
):
for _ in range(objects_count):
file_path = generate_file(object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
objects.append(file_name)
s3_client.put_object(bucket, file_path)
return objects
@pytest.fixture @pytest.fixture
def second_wallet_public_key(self): def second_wallet_public_key(self):
second_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json") second_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json")
@ -67,7 +38,7 @@ class TestS3GateObject:
public_key = wallet_utils.get_wallet_public_key(second_wallet, DEFAULT_WALLET_PASS) public_key = wallet_utils.get_wallet_public_key(second_wallet, DEFAULT_WALLET_PASS)
yield public_key yield public_key
@allure.title("{s3_client}: Copy object") @allure.title("Copy object (s3_client={s3_client})")
def test_s3_copy_object( def test_s3_copy_object(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -123,7 +94,7 @@ class TestS3GateObject:
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.copy_object(bucket_1, file_name) s3_client.copy_object(bucket_1, file_name)
@allure.title("{s3_client}: Copy version of object") @allure.title("Copy version of object (s3_client={s3_client})")
def test_s3_copy_version_object( def test_s3_copy_version_object(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -170,7 +141,7 @@ class TestS3GateObject:
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.copy_object(bucket_1, obj_key) s3_client.copy_object(bucket_1, obj_key)
@allure.title("{s3_client}: Checking copy with acl") @allure.title("Copy with acl (s3_client={s3_client})")
def test_s3_copy_acl( def test_s3_copy_acl(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):
@ -191,7 +162,7 @@ class TestS3GateObject:
obj_acl = s3_client.get_object_acl(bucket, copy_obj_path) obj_acl = s3_client.get_object_acl(bucket, copy_obj_path)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser") s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
@allure.title("{s3_client}: Copy object with metadata") @allure.title("Copy object with metadata (s3_client={s3_client})")
def test_s3_copy_metadate( def test_s3_copy_metadate(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):
@ -238,7 +209,7 @@ class TestS3GateObject:
obj_head.get("Metadata") == object_metadata_1 obj_head.get("Metadata") == object_metadata_1
), f"Metadata must be {object_metadata_1}" ), f"Metadata must be {object_metadata_1}"
@allure.title("{s3_client}: Copy object with tagging") @allure.title("Copy object with tagging (s3_client={s3_client})")
def test_s3_copy_tagging( def test_s3_copy_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):
@ -289,7 +260,7 @@ class TestS3GateObject:
for tag in expected_tags: for tag in expected_tags:
assert tag in got_tags, f"Expected tag {tag} in {got_tags}" assert tag in got_tags, f"Expected tag {tag} in {got_tags}"
@allure.title("{s3_client}: Delete version of object") @allure.title("Delete version of object (s3_client={s3_client})")
def test_s3_delete_versioning( def test_s3_delete_versioning(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -352,7 +323,7 @@ class TestS3GateObject:
assert versions.get("DeleteMarkers", None), "Expected delete Marker" assert versions.get("DeleteMarkers", None), "Expected delete Marker"
assert "DeleteMarker" in delete_obj.keys(), "Expected delete Marker" assert "DeleteMarker" in delete_obj.keys(), "Expected delete Marker"
@allure.title("{s3_client}: bulk delete version of object") @allure.title("Bulk delete version of object (s3_client={s3_client})")
def test_s3_bulk_delete_versioning( def test_s3_bulk_delete_versioning(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):
@ -407,7 +378,7 @@ class TestS3GateObject:
obj_versions.sort() == version_to_save.sort() obj_versions.sort() == version_to_save.sort()
), f"Object should have versions: {version_to_save}" ), f"Object should have versions: {version_to_save}"
@allure.title("{s3_client}: Get versions of object") @allure.title("Get versions of object (s3_client={s3_client})")
def test_s3_get_versioning( def test_s3_get_versioning(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):
@ -444,7 +415,7 @@ class TestS3GateObject:
object_3.get("VersionId") == version_id_2 object_3.get("VersionId") == version_id_2
), f"Get object with version {version_id_2}" ), f"Get object with version {version_id_2}"
@allure.title("{s3_client}: Get range") @allure.title("Get range (s3_client={s3_client})")
def test_s3_get_range( def test_s3_get_range(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -546,26 +517,34 @@ class TestS3GateObject:
return result_list return result_list
@allure.title("{s3_client}: Bulk deletion should be limited to 1000 objects") @allure.title("Bulk deletion is limited to 1000 objects (s3_client={s3_client})")
@pytest.mark.parametrize(
"objects_in_bucket, object_size",
[(3, 10)],
indirect=True,
)
def test_s3_bulk_deletion_limit( def test_s3_bulk_deletion_limit(
self, s3_client: S3ClientWrapper, bucket: str, objects_in_bucket: list[str] self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: ObjectSize,
): ):
objects_in_bucket = []
objects_count = 3
with allure.step(f"Put {objects_count} into bucket"):
for _ in range(objects_count):
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
objects_in_bucket.append(file_name)
s3_client.put_object(bucket, file_path)
# Extend deletion list to 1001 elements with same keys for test speed # Extend deletion list to 1001 elements with same keys for test speed
objects_to_delete = self.copy_extend_list(objects_in_bucket, 1001) objects_to_delete = self.copy_extend_list(objects_in_bucket, 1001)
with allure.step("Delete 1001 objects and expect error"): with allure.step("Send delete request with 1001 objects and expect error"):
with pytest.raises(Exception, match=S3_MALFORMED_XML_REQUEST): with pytest.raises(Exception, match=S3_MALFORMED_XML_REQUEST):
s3_client.delete_objects(bucket, objects_to_delete) s3_client.delete_objects(bucket, objects_to_delete)
with allure.step("Delete 1000 objects without error"): with allure.step("Send delete request with 1000 objects without error"):
with expect_not_raises(): with expect_not_raises():
s3_client.delete_objects(bucket, objects_to_delete[:1000]) s3_client.delete_objects(bucket, objects_to_delete[:1000])
@allure.title("{s3_client}: Copy object with metadata") @allure.title("Copy object with metadata (s3_client={s3_client})")
@pytest.mark.smoke @pytest.mark.smoke
def test_s3_head_object( def test_s3_head_object(
self, self,
@ -606,7 +585,9 @@ class TestS3GateObject:
), f"Expected VersionId is {version_id_1}" ), f"Expected VersionId is {version_id_1}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero" assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
@allure.title("{s3_client}: list of objects with version {list_type}") @allure.title(
"List of objects with version (method_version={list_type}, s3_client={s3_client})"
)
@pytest.mark.parametrize("list_type", ["v1", "v2"]) @pytest.mark.parametrize("list_type", ["v1", "v2"])
def test_s3_list_object( def test_s3_list_object(
self, self,
@ -648,7 +629,7 @@ class TestS3GateObject:
), f"bucket should have object key {file_name_2}" ), f"bucket should have object key {file_name_2}"
assert "DeleteMarker" in delete_obj.keys(), "Expected delete Marker" assert "DeleteMarker" in delete_obj.keys(), "Expected delete Marker"
@allure.title("{s3_client}: put object") @allure.title("Put object (s3_client={s3_client})")
def test_s3_put_object( def test_s3_put_object(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -754,7 +735,7 @@ class TestS3GateObject:
{"Key": tag_key_3, "Value": str(tag_value_3)} {"Key": tag_key_3, "Value": str(tag_value_3)}
], "Tags must be the same" ], "Tags must be the same"
@allure.title("{s3_client}: put object with ACL and versioning is {bucket_versioning}") @allure.title("Put object with ACL (versioning={bucket_versioning}, s3_client={s3_client})")
@pytest.mark.parametrize("bucket_versioning", ["ENABLED", "SUSPENDED"]) @pytest.mark.parametrize("bucket_versioning", ["ENABLED", "SUSPENDED"])
def test_s3_put_object_acl( def test_s3_put_object_acl(
self, self,
@ -839,7 +820,7 @@ class TestS3GateObject:
object_6 = s3_client.get_object(bucket, file_name_5) object_6 = s3_client.get_object(bucket, file_name_5)
assert get_file_hash(file_path_5) == get_file_hash(object_6), "Hashes must be the same" assert get_file_hash(file_path_5) == get_file_hash(object_6), "Hashes must be the same"
@allure.title("{s3_client}: put object with lock-mode") @allure.title("Put object with lock-mode (s3_client={s3_client})")
def test_s3_put_object_lock_mode( def test_s3_put_object_lock_mode(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -920,7 +901,7 @@ class TestS3GateObject:
object_lock_retain_until_date=date_obj, object_lock_retain_until_date=date_obj,
) )
@allure.title("{s3_client}: Sync directory with sync type {sync_type}") @allure.title("Sync directory (sync_type={sync_type}, s3_client={s3_client})")
@pytest.mark.parametrize("sync_type", ["sync", "cp"]) @pytest.mark.parametrize("sync_type", ["sync", "cp"])
def test_s3_sync_dir( def test_s3_sync_dir(
self, self,
@ -976,7 +957,7 @@ class TestS3GateObject:
# obj_acl = s3_client.get_object_acl(bucket, obj_key) # obj_acl = s3_client.get_object_acl(bucket, obj_key)
# s3_helper.assert_s3_acl(acl_grants = obj_acl, permitted_users = "AllUsers") # s3_helper.assert_s3_acl(acl_grants = obj_acl, permitted_users = "AllUsers")
@allure.title("{s3_client}: Put 10 nested level object") @allure.title("Put 10 nested level object (s3_client={s3_client})")
def test_s3_put_10_folder( def test_s3_put_10_folder(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
@ -995,7 +976,7 @@ class TestS3GateObject:
s3_client.put_object(bucket, file_path_1) s3_client.put_object(bucket, file_path_1)
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name]) s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
@allure.title("{s3_client}: Delete non-existing object from empty bucket") @allure.title("Delete non-existing object from empty bucket (s3_client={s3_client})")
def test_s3_delete_non_existing_object(self, s3_client: S3ClientWrapper, bucket: str): def test_s3_delete_non_existing_object(self, s3_client: S3ClientWrapper, bucket: str):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
@ -1012,7 +993,7 @@ class TestS3GateObject:
objects_list = s3_client.list_objects_versions(bucket) objects_list = s3_client.list_objects_versions(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}" assert not objects_list, f"Expected empty bucket, got {objects_list}"
@allure.title("{s3_client}: Delete the same object twice") @allure.title("Delete the same object twice (s3_client={s3_client})")
def test_s3_delete_twice( def test_s3_delete_twice(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):

View file

@ -25,7 +25,7 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
@pytest.mark.s3_gate @pytest.mark.s3_gate
class TestS3GatePolicy(ClusterTestBase): class TestS3GatePolicy(ClusterTestBase):
@allure.title("{s3_client}: bucket creation with retention policy applied") @allure.title("Bucket creation with retention policy applied (s3_client={s3_client})")
def test_s3_bucket_location( def test_s3_bucket_location(
self, default_wallet: str, s3_client: S3ClientWrapper, simple_object_size: ObjectSize self, default_wallet: str, s3_client: S3ClientWrapper, simple_object_size: ObjectSize
): ):
@ -91,13 +91,13 @@ class TestS3GatePolicy(ClusterTestBase):
) )
assert copies_2 == 3 assert copies_2 == 3
@allure.title("{s3_client}: bucket with unexisting location constraint") @allure.title("Bucket with unexisting location constraint (s3_client={s3_client})")
def test_s3_bucket_wrong_location(self, s3_client: S3ClientWrapper): def test_s3_bucket_wrong_location(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with unenxisting location constraint policy"): with allure.step("Create bucket with unenxisting location constraint policy"):
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.create_bucket(location_constraint="UNEXISTING LOCATION CONSTRAINT") s3_client.create_bucket(location_constraint="UNEXISTING LOCATION CONSTRAINT")
@allure.title("{s3_client}: bucket policy") @allure.title("Bucket policy (s3_client={s3_client})")
def test_s3_bucket_policy(self, s3_client: S3ClientWrapper): def test_s3_bucket_policy(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with default policy"): with allure.step("Create bucket with default policy"):
bucket = s3_client.create_bucket() bucket = s3_client.create_bucket()
@ -127,7 +127,7 @@ class TestS3GatePolicy(ClusterTestBase):
policy_1 = s3_client.get_bucket_policy(bucket) policy_1 = s3_client.get_bucket_policy(bucket)
print(policy_1) print(policy_1)
@allure.title("{s3_client}: bucket CORS") @allure.title("Bucket CORS (s3_client={s3_client})")
def test_s3_cors(self, s3_client: S3ClientWrapper): def test_s3_cors(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket without cors"): with allure.step("Create bucket without cors"):
bucket = s3_client.create_bucket() bucket = s3_client.create_bucket()

View file

@ -28,7 +28,7 @@ class TestS3GateTagging:
tags.append((tag_key, tag_value)) tags.append((tag_key, tag_value))
return tags return tags
@allure.title("{s3_client}: Object tagging") @allure.title("Object tagging (s3_client={s3_client})")
def test_s3_object_tagging( def test_s3_object_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
): ):
@ -78,7 +78,7 @@ class TestS3GateTagging:
s3_client.delete_object_tagging(bucket, file_name) s3_client.delete_object_tagging(bucket, file_name)
s3_helper.check_tags_by_object(s3_client, bucket, file_name, []) s3_helper.check_tags_by_object(s3_client, bucket, file_name, [])
@allure.title("{s3_client}: bucket tagging") @allure.title("Bucket tagging (s3_client={s3_client})")
def test_s3_bucket_tagging(self, s3_client: S3ClientWrapper, bucket: str): def test_s3_bucket_tagging(self, s3_client: S3ClientWrapper, bucket: str):
with allure.step("Put 10 bucket tags"): with allure.step("Put 10 bucket tags"):

View file

@ -17,13 +17,13 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_versioning @pytest.mark.s3_gate_versioning
class TestS3GateVersioning: class TestS3GateVersioning:
@allure.title("{s3_client}: Impossible to disable versioning with object_lock") @allure.title("Impossible to disable versioning with object_lock (s3_client={s3_client})")
def test_s3_version_off(self, s3_client: S3ClientWrapper): def test_s3_version_off(self, s3_client: S3ClientWrapper):
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True) bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with pytest.raises(Exception): with pytest.raises(Exception):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED)
@allure.title("{s3_client}: Enable and disable versioning without object_lock") @allure.title("Enable and disable versioning without object_lock (s3_client={s3_client})")
def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize): def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)

View file

@ -16,12 +16,7 @@ from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.session_token @pytest.mark.session_token
class TestDynamicObjectSession(ClusterTestBase): class TestDynamicObjectSession(ClusterTestBase):
@allure.title("Object Operations with Session Token for {object_size}") @allure.title("Object Operations with Session Token (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
)
def test_object_session_token(self, default_wallet: str, object_size: ObjectSize): def test_object_session_token(self, default_wallet: str, object_size: ObjectSize):
""" """
Test how operations over objects are executed with a session token Test how operations over objects are executed with a session token

View file

@ -41,7 +41,6 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from pytest import FixtureRequest
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -62,8 +61,6 @@ def storage_containers(
@pytest.fixture( @pytest.fixture(
params=[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object size", "complex object size"],
# Scope module to upload/delete each files set only once # Scope module to upload/delete each files set only once
scope="module", scope="module",
) )
@ -72,10 +69,9 @@ def storage_objects(
client_shell: Shell, client_shell: Shell,
storage_containers: list[str], storage_containers: list[str],
cluster: Cluster, cluster: Cluster,
request: FixtureRequest, object_size: ObjectSize,
) -> list[StorageObjectInfo]: ) -> list[StorageObjectInfo]:
object_size: ObjectSize = request.param
file_path = generate_file(object_size.value) file_path = generate_file(object_size.value)
storage_objects = [] storage_objects = []
@ -152,14 +148,15 @@ def static_sessions(
@pytest.mark.static_session @pytest.mark.static_session
class TestObjectStaticSession(ClusterTestBase): class TestObjectStaticSession(ClusterTestBase):
@allure.title("Read operations with static session: {storage_objects} {verb.value}") @allure.title(
"Read operations with static session (method={method_under_test.__name__}, obj_size={object_size})"
)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"method_under_test,verb", "method_under_test,verb",
[ [
(head_object, ObjectVerb.HEAD), (head_object, ObjectVerb.HEAD),
(get_object, ObjectVerb.GET), (get_object, ObjectVerb.GET),
], ],
ids=["head", "get"],
) )
def test_static_session_read( def test_static_session_read(
self, self,
@ -168,14 +165,10 @@ class TestObjectStaticSession(ClusterTestBase):
static_sessions: dict[ObjectVerb, str], static_sessions: dict[ObjectVerb, str],
method_under_test, method_under_test,
verb: ObjectVerb, verb: ObjectVerb,
request: FixtureRequest,
): ):
""" """
Validate static session with read operations Validate static session with read operations
""" """
allure.dynamic.title(
f"Read operation with static session: {request.node.callspec.id.replace('-', ' ')}"
)
for node in self.cluster.storage_nodes: for node in self.cluster.storage_nodes:
for storage_object in storage_objects[0:2]: for storage_object in storage_objects[0:2]:
@ -188,11 +181,12 @@ class TestObjectStaticSession(ClusterTestBase):
session=static_sessions[verb], session=static_sessions[verb],
) )
@allure.title("Range operations with static session for: {storage_objects} {verb.value}") @allure.title(
"Range operations with static session (method={method_under_test.__name__}, obj_size={object_size})"
)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"method_under_test,verb", "method_under_test,verb",
[(get_range, ObjectVerb.RANGE), (get_range_hash, ObjectVerb.RANGEHASH)], [(get_range, ObjectVerb.RANGE), (get_range_hash, ObjectVerb.RANGEHASH)],
ids=["range", "rangehash"],
) )
def test_static_session_range( def test_static_session_range(
self, self,
@ -201,15 +195,11 @@ class TestObjectStaticSession(ClusterTestBase):
static_sessions: dict[ObjectVerb, str], static_sessions: dict[ObjectVerb, str],
method_under_test, method_under_test,
verb: ObjectVerb, verb: ObjectVerb,
request: FixtureRequest,
max_object_size, max_object_size,
): ):
""" """
Validate static session with range operations Validate static session with range operations
""" """
allure.dynamic.title(
f"Range operation with static session: {request.node.callspec.id.replace('-', ' ')}"
)
storage_object = storage_objects[0] storage_object = storage_objects[0]
ranges_to_test = get_ranges( ranges_to_test = get_ranges(
storage_object, max_object_size, self.shell, self.cluster.default_rpc_endpoint storage_object, max_object_size, self.shell, self.cluster.default_rpc_endpoint
@ -228,18 +218,16 @@ class TestObjectStaticSession(ClusterTestBase):
range_cut=range_to_test, range_cut=range_to_test,
) )
@allure.title("Search operation with static session for {storage_objects}") @allure.title("Search operation with static session (obj_size={object_size})")
def test_static_session_search( def test_static_session_search(
self, self,
user_wallet: WalletInfo, user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str], static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
): ):
""" """
Validate static session with search operations Validate static session with search operations
""" """
allure.dynamic.title(f"Search operation with static session for {request.node.callspec.id}")
cid = storage_objects[0].cid cid = storage_objects[0].cid
expected_object_ids = [storage_object.oid for storage_object in storage_objects[0:2]] expected_object_ids = [storage_object.oid for storage_object in storage_objects[0:2]]
@ -253,20 +241,18 @@ class TestObjectStaticSession(ClusterTestBase):
) )
assert sorted(expected_object_ids) == sorted(actual_object_ids) assert sorted(expected_object_ids) == sorted(actual_object_ids)
@allure.title("Static session with object id not in session for {storage_objects}") @allure.title(
"[NEGATIVE] Static session with object id not in session (obj_size={object_size})"
)
def test_static_session_unrelated_object( def test_static_session_unrelated_object(
self, self,
user_wallet: WalletInfo, user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str], static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
): ):
""" """
Validate static session with object id not in session Validate static session with object id not in session
""" """
allure.dynamic.title(
f"Static session with object id not in session for {request.node.callspec.id}"
)
with pytest.raises(Exception, match=UNRELATED_OBJECT): with pytest.raises(Exception, match=UNRELATED_OBJECT):
head_object( head_object(
user_wallet.path, user_wallet.path,
@ -277,20 +263,16 @@ class TestObjectStaticSession(ClusterTestBase):
session=static_sessions[ObjectVerb.HEAD], session=static_sessions[ObjectVerb.HEAD],
) )
@allure.title("Static session with user id not in session for {storage_objects}") @allure.title("[NEGATIVE] Static session with user id not in session (obj_size={object_size})")
def test_static_session_head_unrelated_user( def test_static_session_head_unrelated_user(
self, self,
stranger_wallet: WalletInfo, stranger_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str], static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
): ):
""" """
Validate static session with user id not in session Validate static session with user id not in session
""" """
allure.dynamic.title(
f"Static session with user id not in session for {request.node.callspec.id}"
)
storage_object = storage_objects[0] storage_object = storage_objects[0]
with pytest.raises(Exception, match=UNRELATED_KEY): with pytest.raises(Exception, match=UNRELATED_KEY):
@ -303,20 +285,16 @@ class TestObjectStaticSession(ClusterTestBase):
session=static_sessions[ObjectVerb.HEAD], session=static_sessions[ObjectVerb.HEAD],
) )
@allure.title("Static session with wrong verb in session for {storage_objects}") @allure.title("[NEGATIVE] Static session with wrong verb in session (obj_size={object_size})")
def test_static_session_head_wrong_verb( def test_static_session_head_wrong_verb(
self, self,
user_wallet: WalletInfo, user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str], static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
): ):
""" """
Validate static session with wrong verb in session Validate static session with wrong verb in session
""" """
allure.dynamic.title(
f"Static session with wrong verb in session for {request.node.callspec.id}"
)
storage_object = storage_objects[0] storage_object = storage_objects[0]
with pytest.raises(Exception, match=WRONG_VERB): with pytest.raises(Exception, match=WRONG_VERB):
@ -329,21 +307,19 @@ class TestObjectStaticSession(ClusterTestBase):
session=static_sessions[ObjectVerb.HEAD], session=static_sessions[ObjectVerb.HEAD],
) )
@allure.title("Static session with container id not in session for {storage_objects}") @allure.title(
"[NEGATIVE] Static session with container id not in session (obj_size={object_size})"
)
def test_static_session_unrelated_container( def test_static_session_unrelated_container(
self, self,
user_wallet: WalletInfo, user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
storage_containers: list[str], storage_containers: list[str],
static_sessions: dict[ObjectVerb, str], static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
): ):
""" """
Validate static session with container id not in session Validate static session with container id not in session
""" """
allure.dynamic.title(
f"Static session with container id not in session for {request.node.callspec.id}"
)
storage_object = storage_objects[0] storage_object = storage_objects[0]
with pytest.raises(Exception, match=UNRELATED_CONTAINER): with pytest.raises(Exception, match=UNRELATED_CONTAINER):
@ -356,7 +332,7 @@ class TestObjectStaticSession(ClusterTestBase):
session=static_sessions[ObjectVerb.GET], session=static_sessions[ObjectVerb.GET],
) )
@allure.title("Static session which signed by another wallet for {storage_objects}") @allure.title("[NEGATIVE] Static session signed by another wallet (obj_size={object_size})")
def test_static_session_signed_by_other( def test_static_session_signed_by_other(
self, self,
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
@ -365,14 +341,10 @@ class TestObjectStaticSession(ClusterTestBase):
storage_containers: list[str], storage_containers: list[str],
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
temp_directory: str, temp_directory: str,
request: FixtureRequest,
): ):
""" """
Validate static session which signed by another wallet Validate static session which signed by another wallet
""" """
allure.dynamic.title(
f"Static session which signed by another wallet for {request.node.callspec.id}"
)
storage_object = storage_objects[0] storage_object = storage_objects[0]
session_token_file = generate_object_session_token( session_token_file = generate_object_session_token(
@ -394,7 +366,7 @@ class TestObjectStaticSession(ClusterTestBase):
session=signed_token_file, session=signed_token_file,
) )
@allure.title("Static session which signed for another container for {storage_objects}") @allure.title("[NEGATIVE] Static session for another container (obj_size={object_size})")
def test_static_session_signed_for_other_container( def test_static_session_signed_for_other_container(
self, self,
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
@ -402,14 +374,10 @@ class TestObjectStaticSession(ClusterTestBase):
storage_containers: list[str], storage_containers: list[str],
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
temp_directory: str, temp_directory: str,
request: FixtureRequest,
): ):
""" """
Validate static session which signed for another container Validate static session which signed for another container
""" """
allure.dynamic.title(
f"Static session which signed for another container for {request.node.callspec.id}"
)
storage_object = storage_objects[0] storage_object = storage_objects[0]
container = storage_containers[1] container = storage_containers[1]
@ -432,7 +400,7 @@ class TestObjectStaticSession(ClusterTestBase):
session=signed_token_file, session=signed_token_file,
) )
@allure.title("Static session which wasn't signed for {storage_objects}") @allure.title("[NEGATIVE] Static session without sign (obj_size={object_size})")
def test_static_session_without_sign( def test_static_session_without_sign(
self, self,
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
@ -440,12 +408,10 @@ class TestObjectStaticSession(ClusterTestBase):
storage_containers: list[str], storage_containers: list[str],
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
temp_directory: str, temp_directory: str,
request: FixtureRequest,
): ):
""" """
Validate static session which wasn't signed Validate static session which wasn't signed
""" """
allure.dynamic.title(f"Static session which wasn't signed for {request.node.callspec.id}")
storage_object = storage_objects[0] storage_object = storage_objects[0]
session_token_file = generate_object_session_token( session_token_file = generate_object_session_token(
@ -466,7 +432,7 @@ class TestObjectStaticSession(ClusterTestBase):
session=session_token_file, session=session_token_file,
) )
@allure.title("Static session which expires at next epoch for {storage_objects}") @allure.title("Static session which expires at next epoch (obj_size={object_size})")
def test_static_session_expiration_at_next( def test_static_session_expiration_at_next(
self, self,
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
@ -474,14 +440,10 @@ class TestObjectStaticSession(ClusterTestBase):
storage_containers: list[str], storage_containers: list[str],
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
temp_directory: str, temp_directory: str,
request: FixtureRequest,
): ):
""" """
Validate static session which expires at next epoch Validate static session which expires at next epoch
""" """
allure.dynamic.title(
f"Static session which expires at next epoch for {request.node.callspec.id}"
)
epoch = ensure_fresh_epoch(self.shell, self.cluster) epoch = ensure_fresh_epoch(self.shell, self.cluster)
container = storage_containers[0] container = storage_containers[0]
@ -537,7 +499,7 @@ class TestObjectStaticSession(ClusterTestBase):
session=token_expire_at_next_epoch, session=token_expire_at_next_epoch,
) )
@allure.title("Static session which is valid starting from next epoch for {storage_objects}") @allure.title("Static session which is valid since next epoch (obj_size={object_size})")
def test_static_session_start_at_next( def test_static_session_start_at_next(
self, self,
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
@ -545,14 +507,10 @@ class TestObjectStaticSession(ClusterTestBase):
storage_containers: list[str], storage_containers: list[str],
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
temp_directory: str, temp_directory: str,
request: FixtureRequest,
): ):
""" """
Validate static session which is valid starting from next epoch Validate static session which is valid starting from next epoch
""" """
allure.dynamic.title(
f"Static session which is valid starting from next epoch for {request.node.callspec.id}"
)
epoch = ensure_fresh_epoch(self.shell, self.cluster) epoch = ensure_fresh_epoch(self.shell, self.cluster)
container = storage_containers[0] container = storage_containers[0]
@ -622,7 +580,7 @@ class TestObjectStaticSession(ClusterTestBase):
session=token_start_at_next_epoch, session=token_start_at_next_epoch,
) )
@allure.title("Static session which is already expired for {storage_objects}") @allure.title("[NEGATIVE] Expired static session (obj_size={object_size})")
def test_static_session_already_expired( def test_static_session_already_expired(
self, self,
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
@ -630,14 +588,10 @@ class TestObjectStaticSession(ClusterTestBase):
storage_containers: list[str], storage_containers: list[str],
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
temp_directory: str, temp_directory: str,
request: FixtureRequest,
): ):
""" """
Validate static session which is already expired Validate static session which is already expired
""" """
allure.dynamic.title(
f"Static session which is already expired for {request.node.callspec.id}"
)
epoch = ensure_fresh_epoch(self.shell, self.cluster) epoch = ensure_fresh_epoch(self.shell, self.cluster)
container = storage_containers[0] container = storage_containers[0]
@ -665,20 +619,16 @@ class TestObjectStaticSession(ClusterTestBase):
session=token_already_expired, session=token_already_expired,
) )
@allure.title("Delete verb should be restricted for static session for {storage_objects}") @allure.title("Delete verb is restricted for static session (obj_size={object_size})")
def test_static_session_delete_verb( def test_static_session_delete_verb(
self, self,
user_wallet: WalletInfo, user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str], static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
): ):
""" """
Delete verb should be restricted for static session Delete verb should be restricted for static session
""" """
allure.dynamic.title(
f"Delete verb should be restricted for static session for {request.node.callspec.id}"
)
storage_object = storage_objects[0] storage_object = storage_objects[0]
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
delete_object( delete_object(
@ -690,20 +640,16 @@ class TestObjectStaticSession(ClusterTestBase):
session=static_sessions[ObjectVerb.DELETE], session=static_sessions[ObjectVerb.DELETE],
) )
@allure.title("Put verb should be restricted for static session for {storage_objects}") @allure.title("Put verb is restricted for static session (obj_size={object_size})")
def test_static_session_put_verb( def test_static_session_put_verb(
self, self,
user_wallet: WalletInfo, user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str], static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
): ):
""" """
Put verb should be restricted for static session Put verb should be restricted for static session
""" """
allure.dynamic.title(
f"Put verb should be restricted for static session for {request.node.callspec.id}"
)
storage_object = storage_objects[0] storage_object = storage_objects[0]
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_object_to_random_node( put_object_to_random_node(
@ -715,7 +661,7 @@ class TestObjectStaticSession(ClusterTestBase):
session=static_sessions[ObjectVerb.PUT], session=static_sessions[ObjectVerb.PUT],
) )
@allure.title("Static session which is issued in future epoch for {storage_objects}") @allure.title("[NEGATIVE] Static session is issued in future epoch (obj_size={object_size})")
def test_static_session_invalid_issued_epoch( def test_static_session_invalid_issued_epoch(
self, self,
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
@ -723,14 +669,10 @@ class TestObjectStaticSession(ClusterTestBase):
storage_containers: list[str], storage_containers: list[str],
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
temp_directory: str, temp_directory: str,
request: FixtureRequest,
): ):
""" """
Validate static session which is issued in future epoch Validate static session which is issued in future epoch
""" """
allure.dynamic.title(
f"Static session which is issued in future epoch for {request.node.callspec.id}"
)
epoch = ensure_fresh_epoch(self.shell, self.cluster) epoch = ensure_fresh_epoch(self.shell, self.cluster)
container = storage_containers[0] container = storage_containers[0]