Compare commits

...
Sign in to create a new pull request.

30 commits

Author SHA1 Message Date
ffd3e7ade3 [#373] Move common fixture to testlib
Signed-off-by: a.berezin <a.berezin@yadro.com>
2025-03-07 17:05:20 +03:00
eedb915283 [#370] Fix internal interface tests. Add APE rule for container
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-02-18 09:23:57 +03:00
f8785fa299 [#368] Extend test_object_api_lifetime test with EC 3.1 policy
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-02-14 08:07:02 +00:00
b03f5f46b2 [#369] Added test node-blobstore metrics
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-02-09 14:56:17 +03:00
99aa2a547a [#367] Delete Time test
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-02-04 10:08:21 +03:00
a375423a4e [#365] Change import CliWrapper class
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-01-30 14:42:57 +03:00
3b120643ad [#364] Fixed epoch adn object metrics tests
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-01-30 12:50:30 +03:00
35f60af47d [#363] Updates for sanity scope
Signed-off-by: a.berezin <a.berezin@yadro.com>
2025-01-29 08:30:08 +00:00
a841251e06 [#362] Rename S3 object patch suite
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-01-28 10:27:24 +03:00
b5d0a6691c [#361] Update test_replication to work with more than 4 nodes
Signed-off-by: a.berezin <a.berezin@yadro.com>
2025-01-22 14:03:28 +00:00
2976d3f936 [#360] Added epoch metrics test
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-01-21 11:12:37 +03:00
4f989e4260 [#359] Run metrics tests before other tests
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-01-17 17:46:12 +03:00
44bb446847 [#358] Do not rely on node id during price configuration
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-12-27 16:32:14 +03:00
67a42cae81 [#357] Use interfaces when appropriate
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-12-27 12:09:22 +03:00
6174330f49 [#355] Attached rule after create container for EC replication test with filter
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-12-26 10:21:48 +00:00
ee1f898849 [#354] Fixed grpc test metrics
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2024-12-26 08:03:54 +00:00
b4d27260ef [#353] Extend testsuites for PATCH method
Expandable suites:
- TestApeContainer
- TestApeBearer
- TestApeLocalOverrideAllow
- TestApeLocalOverrideDeny
- TestObjectApiWithoutUser
- TestObjectApiWithBearerToken

Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-25 15:40:37 +00:00
d466e1a721 [#352] Update expected error
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-12-25 11:49:17 +03:00
48b97404ce [#339] Refine CODEOWNERS settings
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-12-25 08:47:27 +00:00
1e6584f421 [#351] Fixed run sequence metrics tests
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2024-12-24 18:14:17 +03:00
d11abec967 [#350] Fix imports
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-12-24 05:11:02 +00:00
0ac5edfdfb [#348] Add test for multipart object in Test_http_object testsuite
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-24 05:07:10 +00:00
f10d899173 [#349] Update session_token tests
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-12-23 18:01:07 +03:00
9ca70c80e3 [#346] Move s3 and http directories to avoid conflict with requests
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-18 12:56:30 +03:00
177ef36693 [#343] Fixed test logs metrics
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2024-12-17 14:21:40 +00:00
c2f322daeb [#345] Add new args for ContainerRequest
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-12-17 14:17:45 +00:00
4eef2f2437 [#344] Make user fixture parametrized to allow dedicated user creation
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-12-17 11:03:19 +03:00
c75352e267 [#338] Automation of PATCH method in S3
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-17 07:39:46 +00:00
b36c7e90aa [#341] Fixed container metrics for tombstone
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2024-12-13 17:00:08 +03:00
29aca20956 [#337] Update error match message for data network test
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-12-05 16:01:19 +03:00
54 changed files with 1907 additions and 713 deletions

View file

@ -1 +1,3 @@
* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov .* @TrueCloudLab/qa-committers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -10,6 +10,7 @@ markers =
staging: test to be excluded from run in verifier/pr-validation/sanity jobs and run test in staging job staging: test to be excluded from run in verifier/pr-validation/sanity jobs and run test in staging job
sanity: test runs in sanity testrun sanity: test runs in sanity testrun
smoke: test runs in smoke testrun smoke: test runs in smoke testrun
exclude_sanity: tests which should not be in sanity scope
# controlling markers # controlling markers
order: manual control of test order order: manual control of test order
logs_after_session: Make the last test in session logs_after_session: Make the last test in session
@ -66,7 +67,6 @@ markers =
failover_data_loss: failover tests in case of data loss failover_data_loss: failover tests in case of data loss
metabase_loss: tests for metadata loss metabase_loss: tests for metadata loss
write_cache_loss: tests for write cache loss write_cache_loss: tests for write cache loss
time: time tests
replication: replication tests replication: replication tests
ec_replication: replication EC ec_replication: replication EC
static_session_container: tests for a static session in a container static_session_container: tests for a static session in a container

View file

@ -20,7 +20,10 @@ ALL_OBJECT_OPERATIONS = ape.ObjectOperations.get_all()
FULL_ACCESS = {op: True for op in ALL_OBJECT_OPERATIONS} FULL_ACCESS = {op: True for op in ALL_OBJECT_OPERATIONS}
NO_ACCESS = {op: False for op in ALL_OBJECT_OPERATIONS} NO_ACCESS = {op: False for op in ALL_OBJECT_OPERATIONS}
RO_ACCESS = {op: True if op not in [ape.ObjectOperations.PUT, ape.ObjectOperations.DELETE] else False for op in ALL_OBJECT_OPERATIONS} RO_ACCESS = {
op: True if op not in [ape.ObjectOperations.PUT, ape.ObjectOperations.DELETE, ape.ObjectOperations.PATCH] else False
for op in ALL_OBJECT_OPERATIONS
}
def assert_access_to_container( def assert_access_to_container(

View file

@ -63,7 +63,15 @@ def _create_container_by_spec(
cluster: Cluster, cluster: Cluster,
endpoint: str, endpoint: str,
) -> str: ) -> str:
return create_container(wallet, shell, endpoint, container_request.parsed_rule(cluster), wait_for_creation=False) return create_container(
wallet,
shell,
endpoint,
container_request.parsed_rule(cluster),
wait_for_creation=False,
nns_zone=container_request.ns_zone,
nns_name=container_request.ns_name,
)
def _apply_ape_rules(cid: str, frostfs_cli: FrostfsCli, endpoint: str, ape_rules: list[ape.Rule]): def _apply_ape_rules(cid: str, frostfs_cli: FrostfsCli, endpoint: str, ape_rules: list[ape.Rule]):

View file

@ -21,6 +21,9 @@ class ContainerRequest:
short_name: str | None = None short_name: str | None = None
ns_name: str | None = None
ns_zone: str | None = None
def __post_init__(self): def __post_init__(self):
if self.ape_rules is None: if self.ape_rules is None:
self.ape_rules = [] self.ape_rules = []

View file

@ -15,8 +15,7 @@ def validate_object_policy(wallet: str, shell: Shell, placement_rule: str, cid:
def get_netmap_param(netmap_info: list[NodeNetmapInfo]) -> dict: def get_netmap_param(netmap_info: list[NodeNetmapInfo]) -> dict:
dict_external = dict() dict_external = dict()
for node in netmap_info: for node in netmap_info:
external_adress = node.external_address[0].split("/")[2] dict_external[node.node] = {
dict_external[external_adress] = {
"country": node.country, "country": node.country,
"country_code": node.country_code, "country_code": node.country_code,
"Price": node.price, "Price": node.price,

View file

@ -2,10 +2,12 @@ import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
from frostfs_testlib.steps.cli.object import put_object_to_random_node from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.node_management import drop_object from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import wallet_utils from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication from frostfs_testlib.utils.failover_utils import wait_object_replication
@ -33,9 +35,12 @@ def allowed_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: a
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.ape @pytest.mark.ape
class TestApeContainer(ClusterTestBase): class TestApeContainer(ClusterTestBase):
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@pytest.mark.sanity @pytest.mark.sanity
@allure.title("Deny operations via APE by role (role={role}, obj_size={object_size})") @allure.title("Deny operations via APE by role (role={role}, obj_size={object_size})")
@pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True) @pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True)
@pytest.mark.parametrize("objects", [4], indirect=True)
def test_deny_operations_via_ape_by_role( def test_deny_operations_via_ape_by_role(
self, self,
denied_wallet: WalletInfo, denied_wallet: WalletInfo,
@ -44,7 +49,7 @@ class TestApeContainer(ClusterTestBase):
container: str, container: str,
objects: list[str], objects: list[str],
role: ape.Role, role: ape.Role,
file_path: TestFile, test_file: TestFile,
rpc_endpoint: str, rpc_endpoint: str,
): ):
with reporter.step(f"Deny all operations for {role} via APE"): with reporter.step(f"Deny all operations for {role} via APE"):
@ -58,10 +63,10 @@ class TestApeContainer(ClusterTestBase):
with reporter.step(f"Assert denied role have no access to public container"): with reporter.step(f"Assert denied role have no access to public container"):
# access checks will try to remove object, so we use .pop() to ensure we have object before deletion # access checks will try to remove object, so we use .pop() to ensure we have object before deletion
assert_no_access_to_container(denied_wallet, container, objects.pop(), file_path, self.shell, self.cluster) assert_no_access_to_container(denied_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step(f"Assert allowed role have full access to public container"): with reporter.step(f"Assert allowed role have full access to public container"):
assert_full_access_to_container(allowed_wallet, container, objects.pop(), file_path, self.shell, self.cluster) assert_full_access_to_container(allowed_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step(f"Remove deny rule from APE"): with reporter.step(f"Remove deny rule from APE"):
frostfs_cli.ape_manager.remove(rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container") frostfs_cli.ape_manager.remove(rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container")
@ -70,12 +75,15 @@ class TestApeContainer(ClusterTestBase):
self.wait_for_blocks() self.wait_for_blocks()
with reporter.step("Assert allowed role have full access to public container"): with reporter.step("Assert allowed role have full access to public container"):
assert_full_access_to_container(allowed_wallet, container, objects.pop(), file_path, self.shell, self.cluster) assert_full_access_to_container(allowed_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step("Assert denied role have full access to public container"): with reporter.step("Assert denied role have full access to public container"):
assert_full_access_to_container(denied_wallet, container, objects.pop(), file_path, self.shell, self.cluster) assert_full_access_to_container(denied_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@allure.title("Deny operations for others via APE excluding single pubkey (obj_size={object_size})") @allure.title("Deny operations for others via APE excluding single pubkey (obj_size={object_size})")
@pytest.mark.parametrize("objects", [2], indirect=True)
def test_deny_opeartions_excluding_pubkey( def test_deny_opeartions_excluding_pubkey(
self, self,
frostfs_cli: FrostfsCli, frostfs_cli: FrostfsCli,
@ -85,7 +93,7 @@ class TestApeContainer(ClusterTestBase):
container: str, container: str,
objects: list[str], objects: list[str],
rpc_endpoint: str, rpc_endpoint: str,
file_path: TestFile, test_file: TestFile,
): ):
with reporter.step("Add deny APE rules for others except single wallet"): with reporter.step("Add deny APE rules for others except single wallet"):
rule_conditions = [ rule_conditions = [
@ -103,13 +111,13 @@ class TestApeContainer(ClusterTestBase):
with reporter.step("Assert others have no access to public container"): with reporter.step("Assert others have no access to public container"):
# access checks will try to remove object, so we use .pop() to ensure we have object before deletion # access checks will try to remove object, so we use .pop() to ensure we have object before deletion
assert_no_access_to_container(other_wallet, container, objects[0], file_path, self.shell, self.cluster) assert_no_access_to_container(other_wallet, container, objects[0], test_file, self.shell, self.cluster)
with reporter.step("Assert owner have full access to public container"): with reporter.step("Assert owner have full access to public container"):
assert_full_access_to_container(default_wallet, container, objects.pop(), file_path, self.shell, self.cluster) assert_full_access_to_container(default_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step("Assert allowed wallet have full access to public container"): with reporter.step("Assert allowed wallet have full access to public container"):
assert_full_access_to_container(other_wallet_2, container, objects.pop(), file_path, self.shell, self.cluster) assert_full_access_to_container(other_wallet_2, container, objects.pop(), test_file, self.shell, self.cluster)
@allure.title("Replication works with APE deny rules on OWNER and OTHERS (obj_size={object_size})") @allure.title("Replication works with APE deny rules on OWNER and OTHERS (obj_size={object_size})")
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -123,10 +131,10 @@ class TestApeContainer(ClusterTestBase):
frostfs_cli: FrostfsCli, frostfs_cli: FrostfsCli,
container: str, container: str,
rpc_endpoint: str, rpc_endpoint: str,
file_path: TestFile, test_file: TestFile,
): ):
with reporter.step("Put object to container"): with reporter.step("Put object to container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster) oid = put_object_to_random_node(default_wallet, test_file, container, self.shell, self.cluster)
with reporter.step("Wait for object replication after upload"): with reporter.step("Wait for object replication after upload"):
wait_object_replication(container, oid, len(self.cluster.cluster_nodes), self.shell, self.cluster.storage_nodes) wait_object_replication(container, oid, len(self.cluster.cluster_nodes), self.shell, self.cluster.storage_nodes)
@ -151,10 +159,13 @@ class TestApeContainer(ClusterTestBase):
with reporter.step("Wait for dropped object to be replicated"): with reporter.step("Wait for dropped object to be replicated"):
wait_object_replication(container, oid, len(self.cluster.storage_nodes), self.shell, self.cluster.storage_nodes) wait_object_replication(container, oid, len(self.cluster.storage_nodes), self.shell, self.cluster.storage_nodes)
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@allure.title("Deny operations via APE by role (role=ir, obj_size={object_size})") @allure.title("Deny operations via APE by role (role=ir, obj_size={object_size})")
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True) @pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
@pytest.mark.parametrize("objects", [3], indirect=True)
def test_deny_operations_via_ape_by_role_ir( def test_deny_operations_via_ape_by_role_ir(
self, frostfs_cli: FrostfsCli, ir_wallet: WalletInfo, container: str, objects: list[str], rpc_endpoint: str, file_path: TestFile self, frostfs_cli: FrostfsCli, ir_wallet: WalletInfo, container: str, objects: list[str], rpc_endpoint: str, test_file: TestFile
): ):
default_ir_access = { default_ir_access = {
ape.ObjectOperations.PUT: False, ape.ObjectOperations.PUT: False,
@ -163,11 +174,12 @@ class TestApeContainer(ClusterTestBase):
ape.ObjectOperations.GET_RANGE: True, ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True, ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True, ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.PATCH: False,
ape.ObjectOperations.DELETE: False, ape.ObjectOperations.DELETE: False,
} }
with reporter.step("Assert IR wallet access in default state"): with reporter.step("Assert IR wallet access in default state"):
assert_access_to_container(default_ir_access, ir_wallet, container, objects[0], file_path, self.shell, self.cluster) assert_access_to_container(default_ir_access, ir_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step("Add deny APE rule with deny all operations for IR role"): with reporter.step("Add deny APE rule with deny all operations for IR role"):
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, [ape.Condition.by_role(ape.Role.IR.value)]) rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, [ape.Condition.by_role(ape.Role.IR.value)])
@ -177,7 +189,7 @@ class TestApeContainer(ClusterTestBase):
self.wait_for_blocks() self.wait_for_blocks()
with reporter.step("Assert IR wallet ignores APE rules"): with reporter.step("Assert IR wallet ignores APE rules"):
assert_access_to_container(default_ir_access, ir_wallet, container, objects[0], file_path, self.shell, self.cluster) assert_access_to_container(default_ir_access, ir_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step("Remove APE rule"): with reporter.step("Remove APE rule"):
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container") frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
@ -186,10 +198,13 @@ class TestApeContainer(ClusterTestBase):
self.wait_for_blocks() self.wait_for_blocks()
with reporter.step("Assert IR wallet access is restored"): with reporter.step("Assert IR wallet access is restored"):
assert_access_to_container(default_ir_access, ir_wallet, container, objects[0], file_path, self.shell, self.cluster) assert_access_to_container(default_ir_access, ir_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@allure.title("Deny operations via APE by role (role=container, obj_size={object_size})") @allure.title("Deny operations via APE by role (role=container, obj_size={object_size})")
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True) @pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
@pytest.mark.parametrize("objects", [3], indirect=True)
def test_deny_operations_via_ape_by_role_container( def test_deny_operations_via_ape_by_role_container(
self, self,
frostfs_cli: FrostfsCli, frostfs_cli: FrostfsCli,
@ -197,7 +212,7 @@ class TestApeContainer(ClusterTestBase):
container: str, container: str,
objects: list[str], objects: list[str],
rpc_endpoint: str, rpc_endpoint: str,
file_path: TestFile, test_file: TestFile,
): ):
access_matrix = { access_matrix = {
ape.ObjectOperations.PUT: True, ape.ObjectOperations.PUT: True,
@ -206,11 +221,12 @@ class TestApeContainer(ClusterTestBase):
ape.ObjectOperations.GET_RANGE: True, ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True, ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True, ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.PATCH: True,
ape.ObjectOperations.DELETE: True, ape.ObjectOperations.DELETE: True,
} }
with reporter.step("Assert CONTAINER wallet access in default state"): with reporter.step("Assert CONTAINER wallet access in default state"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects[0], file_path, self.shell, self.cluster) assert_access_to_container(access_matrix, container_node_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, ape.Condition.by_role(ape.Role.CONTAINER.value)) rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, ape.Condition.by_role(ape.Role.CONTAINER.value))
@ -221,7 +237,7 @@ class TestApeContainer(ClusterTestBase):
self.wait_for_blocks() self.wait_for_blocks()
with reporter.step("Assert CONTAINER wallet ignores APE rule"): with reporter.step("Assert CONTAINER wallet ignores APE rule"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects[1], file_path, self.shell, self.cluster) assert_access_to_container(access_matrix, container_node_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step("Remove APE rule"): with reporter.step("Remove APE rule"):
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container") frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
@ -230,4 +246,152 @@ class TestApeContainer(ClusterTestBase):
self.wait_for_blocks() self.wait_for_blocks()
with reporter.step("Assert CONTAINER wallet access after rule was removed"): with reporter.step("Assert CONTAINER wallet access after rule was removed"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects[2], file_path, self.shell, self.cluster) assert_access_to_container(access_matrix, container_node_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
# ^
@allure.title("Deny PATCH operation via APE (object_size={object_size})")
@pytest.mark.parametrize("objects", [1], indirect=True)
def test_patch_object_with_deny_rule(
self,
frostfs_cli: FrostfsCli,
grpc_client: GrpcClientWrapper,
grpc_client_with_other_wallet: GrpcClientWrapper,
grpc_client_with_container_wallet: GrpcClientWrapper,
grpc_client_with_ir_wallet: GrpcClientWrapper,
container: str,
objects: list[str],
test_file: TestFile,
):
patch_params = {
"cid": container,
"oid": objects[0],
"endpoint": self.cluster.default_rpc_endpoint,
"ranges": ["300:200"],
"payloads": [test_file],
"new_attrs": "owner=true",
"timeout": "200s",
}
with reporter.step("Check that PATCH is available with owner wallet"):
patched_oid = grpc_client.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with another wallet"):
patch_params["ranges"] = ["100:50"]
patch_params["new_attrs"] = "other=true"
patched_oid = grpc_client_with_other_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with container wallet"):
patch_params["ranges"] = ["600:0"]
patch_params["new_attrs"] = "container=true"
patched_oid = grpc_client_with_container_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with ir wallet"):
patch_params["ranges"] = ["0:1000"]
patch_params["new_attrs"] = "ir=true"
patched_oid = grpc_client_with_ir_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
rule = ape.Rule(ape.Verb.DENY, ape.ObjectOperations.PATCH)
with reporter.step("Add APE rule with deny PATCH operation"):
frostfs_cli.ape_manager.add(
self.cluster.default_rpc_endpoint,
rule.chain_id,
target_name=container,
target_type="container",
rule=rule.as_string(),
)
with reporter.step("Wait for one block"):
self.wait_for_blocks(1)
with reporter.step("Check that PATCH is not allowed with owner wallet"):
patch_params["ranges"] = ["300:200"]
patch_params["new_attrs"] = "owner_2=false"
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
grpc_client.object.patch(**patch_params)
with reporter.step("Check that PATCH is not allowed with another wallet"):
patch_params["ranges"] = ["100:50"]
patch_params["new_attrs"] = "other_2=false"
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
grpc_client_with_other_wallet.object.patch(**patch_params)
with reporter.step("Check that PATCH is allowed with container wallet as rule is ignored"):
patch_params["ranges"] = ["600:0"]
patch_params["new_attrs"] = "container_2=true"
patched_oid = grpc_client_with_container_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is not allowed with ir waller"):
patch_params["ranges"] = ["0:1000"]
patch_params["new_attrs"] = "ir_2=true"
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
grpc_client_with_ir_wallet.object.patch(**patch_params)
with reporter.step("Remove APE rule"):
frostfs_cli.ape_manager.remove(
self.cluster.default_rpc_endpoint,
rule.chain_id,
target_name=container,
target_type="container",
)
with reporter.step("Wait for one block"):
self.wait_for_blocks(1)
with reporter.step("Check that PATCH is available with owner wallet"):
patch_params["ranges"] = ["300:200"]
patch_params["new_attrs"] = "owner_3=true"
patched_oid = grpc_client.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with another wallet"):
patch_params["ranges"] = ["100:50"]
patch_params["new_attrs"] = "other_3=true"
patched_oid = grpc_client_with_other_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with container wallet"):
patch_params["ranges"] = ["600:0"]
patch_params["new_attrs"] = "container_3=true"
patched_oid = grpc_client_with_container_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with ir wallet"):
patch_params["ranges"] = ["0:1000"]
patch_params["new_attrs"] = "ir_3=true"
patched_oid = grpc_client_with_ir_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
attrs = {"owner", "other", "container", "ir", "container_2", "owner_3", "other_3", "container_3", "ir_3"}
with reporter.step("Ensure that all attributes match expected values"):
object_info: dict = grpc_client.object.head(container, patch_params["oid"], self.cluster.default_rpc_endpoint)
object_attrs: dict = object_info["header"]["attributes"]
assert attrs <= {k for k in object_attrs.keys()}, f"Received attributes do not match expected ones: {object_attrs}"
assert all(
v == "true" for k, v in object_attrs.items() if k in attrs
), f"Received attributes do not match expected ones: {object_attrs}"

View file

@ -2,8 +2,10 @@ import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile from frostfs_testlib.utils.file_utils import TestFile
@ -20,9 +22,13 @@ from ....helpers.container_access import (
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.bearer @pytest.mark.bearer
@pytest.mark.ape @pytest.mark.ape
@pytest.mark.parametrize("user_tag", ["ApeBearer"], indirect=True) # provide dedicated user with no APE side-policies
class TestApeBearer(ClusterTestBase): class TestApeBearer(ClusterTestBase):
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@allure.title("Operations with BearerToken (role={role}, obj_size={object_size})") @allure.title("Operations with BearerToken (role={role}, obj_size={object_size})")
@pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True) @pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True)
@pytest.mark.parametrize("objects", [4], indirect=True)
def test_bearer_token_operations( def test_bearer_token_operations(
self, self,
container: str, container: str,
@ -31,11 +37,11 @@ class TestApeBearer(ClusterTestBase):
temp_directory: str, temp_directory: str,
test_wallet: WalletInfo, test_wallet: WalletInfo,
role: ape.Role, role: ape.Role,
file_path: TestFile, test_file: TestFile,
rpc_endpoint: str, rpc_endpoint: str,
): ):
with reporter.step(f"Check {role} has full access to container without bearer token"): with reporter.step(f"Check {role} has full access to container without bearer token"):
assert_full_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster) assert_full_access_to_container(test_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step(f"Deny all operations for everyone via APE"): with reporter.step(f"Deny all operations for everyone via APE"):
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS) rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS)
@ -54,10 +60,10 @@ class TestApeBearer(ClusterTestBase):
) )
with reporter.step(f"Check {role} without token has no access to all operations with container"): with reporter.step(f"Check {role} without token has no access to all operations with container"):
assert_no_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster) assert_no_access_to_container(test_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step(f"Check {role} with token has access to all operations with container"): with reporter.step(f"Check {role} with token has access to all operations with container"):
assert_full_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster, bearer) assert_full_access_to_container(test_wallet, container, objects.pop(), test_file, self.shell, self.cluster, bearer)
with reporter.step(f"Remove deny rule from APE"): with reporter.step(f"Remove deny rule from APE"):
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container") frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
@ -66,9 +72,121 @@ class TestApeBearer(ClusterTestBase):
self.wait_for_blocks() self.wait_for_blocks()
with reporter.step(f"Check {role} without token has access to all operations with container"): with reporter.step(f"Check {role} without token has access to all operations with container"):
assert_full_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster) assert_full_access_to_container(test_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
# ^
@allure.title("Patch operation with BearerToken (object_size={object_size})")
@pytest.mark.parametrize("objects", [1], indirect=True)
def test_patch_object_with_bearer_token(
self,
frostfs_cli: FrostfsCli,
grpc_client_with_other_wallet: GrpcClientWrapper,
container: str,
objects: list[str],
test_file: TestFile,
temp_directory: str,
):
oid = objects[0]
with reporter.step("Check if the patch is available with another wallet"):
patched_oid = grpc_client_with_other_wallet.object.patch(
container,
oid,
self.cluster.default_rpc_endpoint,
ranges=["100:300"],
payloads=[test_file],
new_attrs="allow-patch=true",
timeout="200s",
)
assert patched_oid != oid, "OID of patched object must be different from original one"
oid = patched_oid
rule = ape.Rule(ape.Verb.DENY, ape.ObjectOperations.PATCH)
with reporter.step("Deny PATCH operation for everyone via APE"):
frostfs_cli.ape_manager.add(
self.cluster.default_rpc_endpoint,
rule.chain_id,
target_name=container,
target_type="container",
rule=rule.as_string(),
)
with reporter.step("Wait for one block"):
self.wait_for_blocks(1)
with reporter.step("Check that patch is not allowed with another wallet"):
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
grpc_client_with_other_wallet.object.patch(
container,
oid,
self.cluster.default_rpc_endpoint,
ranges=["100:300"],
payloads=[test_file],
new_attrs="deny-patch=true",
timeout="200s",
)
with reporter.step("Create bearer token with all operations allowed"):
bearer = create_bearer_token(
frostfs_cli,
temp_directory,
container,
rule=ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS),
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step("Check that patch is available with another wallet with BearerToken"):
patched_oid = grpc_client_with_other_wallet.object.patch(
container,
oid,
self.cluster.default_rpc_endpoint,
bearer=bearer,
ranges=["100:300"],
payloads=[test_file],
new_attrs="bearer-patch=true",
timeout="200s",
)
assert patched_oid != oid, "OID of patched object must be different from original one"
oid = patched_oid
with reporter.step(f"Remove deny rule from APE"):
frostfs_cli.ape_manager.remove(
self.cluster.default_rpc_endpoint,
rule.chain_id,
target_name=container,
target_type="container",
)
with reporter.step("Wait for one block"):
self.wait_for_blocks(1)
with reporter.step("Check if the patch is available with another wallet"):
patched_oid = grpc_client_with_other_wallet.object.patch(
container,
oid,
self.cluster.default_rpc_endpoint,
bearer=bearer,
ranges=["100:300"],
payloads=[test_file],
new_attrs="allow-patch-2=true",
timeout="200s",
)
assert patched_oid != oid, "OID of patched object must be different from original one"
oid = patched_oid
attrs = {"allow-patch", "bearer-patch", "allow-patch-2"}
with reporter.step("Ensure that all attributes match expected values"):
object_info: dict = grpc_client_with_other_wallet.object.head(container, oid, self.cluster.default_rpc_endpoint)
object_attrs: dict = object_info["header"]["attributes"]
assert attrs <= {k for k in object_attrs.keys()}, f"Received attributes do not match expected ones: {object_attrs}"
assert all(
v == "true" for k, v in object_attrs.items() if k in attrs
), f"Received attributes do not match expected ones: {object_attrs}"
@allure.title("BearerToken for compound operations (obj_size={object_size})") @allure.title("BearerToken for compound operations (obj_size={object_size})")
@pytest.mark.parametrize("objects", [4], indirect=True)
def test_bearer_token_compound_operations( def test_bearer_token_compound_operations(
self, self,
frostfs_cli: FrostfsCli, frostfs_cli: FrostfsCli,
@ -78,7 +196,7 @@ class TestApeBearer(ClusterTestBase):
container: str, container: str,
objects: list[str], objects: list[str],
rpc_endpoint: str, rpc_endpoint: str,
file_path: TestFile, test_file: TestFile,
): ):
""" """
Bearer Token COMPLETLY overrides chains set for the specific target. Bearer Token COMPLETLY overrides chains set for the specific target.
@ -150,7 +268,7 @@ class TestApeBearer(ClusterTestBase):
ape.ObjectOperations.PUT, ape.ObjectOperations.PUT,
ape.ObjectOperations.HEAD, ape.ObjectOperations.HEAD,
ape.ObjectOperations.GET_RANGE, ape.ObjectOperations.GET_RANGE,
# Delete also requires PUT (to make tobstone) and HEAD (to get simple objects header) # Delete also requires PUT (to make tombstone) and HEAD (to get simple objects header)
ape.ObjectOperations.DELETE, ape.ObjectOperations.DELETE,
], ],
ape.Role.OTHERS: [ ape.Role.OTHERS: [
@ -179,7 +297,7 @@ class TestApeBearer(ClusterTestBase):
for role, wallet in wallets_map.items(): for role, wallet in wallets_map.items():
with reporter.step(f"Assert access to container without bearer token for {role}"): with reporter.step(f"Assert access to container without bearer token for {role}"):
assert_access_to_container(access_map[role], wallet, container, objects.pop(), file_path, self.shell, self.cluster) assert_access_to_container(access_map[role], wallet, container, objects.pop(), test_file, self.shell, self.cluster)
bearer_tokens = {} bearer_tokens = {}
for role in wallets_map.keys(): for role in wallets_map.keys():
@ -191,5 +309,5 @@ class TestApeBearer(ClusterTestBase):
for role, wallet in wallets_map.items(): for role, wallet in wallets_map.items():
with reporter.step(f"Assert access to container with bearer token for {role}"): with reporter.step(f"Assert access to container with bearer token for {role}"):
assert_access_to_container( assert_access_to_container(
bt_access_map[role], wallet, container, objects.pop(), file_path, self.shell, self.cluster, bearer_tokens[role] bt_access_map[role], wallet, container, objects.pop(), test_file, self.shell, self.cluster, bearer_tokens[role]
) )

View file

@ -2,16 +2,18 @@ import json
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.steps.cli.container import search_nodes_with_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.parallel import parallel
OBJECT_COUNT = 5
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def ir_wallet(cluster: Cluster) -> WalletInfo: def ir_wallet(cluster: Cluster) -> WalletInfo:
@ -40,13 +42,22 @@ def test_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.
return role_to_wallet_map[role] return role_to_wallet_map[role]
@pytest.fixture @pytest.fixture(scope="function", params=[5])
def objects(container: str, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, file_path: str): def objects(
container: str,
default_wallet: WalletInfo,
client_shell: Shell,
cluster: Cluster,
test_file: str,
request: pytest.FixtureRequest,
):
object_count = request.param
with reporter.step("Add test objects to container"): with reporter.step("Add test objects to container"):
put_results = parallel( put_results = parallel(
[put_object_to_random_node] * OBJECT_COUNT, [put_object_to_random_node] * object_count,
wallet=default_wallet, wallet=default_wallet,
path=file_path, path=test_file,
cid=container, cid=container,
shell=client_shell, shell=client_shell,
cluster=cluster, cluster=cluster,
@ -70,3 +81,18 @@ def container_nodes(default_wallet: WalletInfo, container: str, client_shell: Sh
@pytest.fixture @pytest.fixture
def container_node_wallet(container_nodes: list[ClusterNode]) -> WalletInfo: def container_node_wallet(container_nodes: list[ClusterNode]) -> WalletInfo:
return WalletInfo.from_node(container_nodes[0].storage_node) return WalletInfo.from_node(container_nodes[0].storage_node)
@pytest.fixture
def grpc_client_with_container_wallet(client_shell: Shell, container_node_wallet: WalletInfo) -> GrpcClientWrapper:
return CliClientWrapper(FrostfsCli(client_shell, FROSTFS_CLI_EXEC, container_node_wallet.config_path))
@pytest.fixture(scope="session")
def grpc_client_with_other_wallet(client_shell: Shell, other_wallet: WalletInfo) -> GrpcClientWrapper:
return CliClientWrapper(FrostfsCli(client_shell, FROSTFS_CLI_EXEC, other_wallet.config_path))
@pytest.fixture(scope="session")
def grpc_client_with_ir_wallet(client_shell: Shell, ir_wallet: WalletInfo) -> GrpcClientWrapper:
return CliClientWrapper(FrostfsCli(client_shell, FROSTFS_CLI_EXEC, ir_wallet.config_path))

View file

@ -3,12 +3,10 @@ import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import NO_RULE_FOUND_OBJECT from frostfs_testlib.resources.error_patterns import NO_RULE_FOUND_OBJECT
from frostfs_testlib.steps.cli.object import delete_object, get_object, get_range, get_range_hash, head_object, put_object, search_object from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import ContainerRequest from ...helpers.container_request import ContainerRequest
@ -20,12 +18,13 @@ REP1_MSK = ContainerRequest("REP 1 IN MOW CBF 1 SELECT 1 FROM MSK AS MOW FILTER
@pytest.mark.ape_object @pytest.mark.ape_object
@pytest.mark.ape_allow @pytest.mark.ape_allow
@pytest.mark.parametrize("container_request", [REP1_MSK], indirect=True) @pytest.mark.parametrize("container_request", [REP1_MSK], indirect=True)
@pytest.mark.parametrize("user_tag", ["ApeLocalOverrideAllow"], indirect=True) # provide dedicated user with no APE side-policies
class TestApeLocalOverrideAllow(ClusterTestBase): class TestApeLocalOverrideAllow(ClusterTestBase):
@allure.title("LocalOverride: Allow to GetObject in root tenant") @allure.title("LocalOverride: Allow to GetObject in root tenant")
def test_local_override_allow_to_get_object_root( def test_local_override_allow_to_get_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str, container: str,
object_id: str, object_id: str,
): ):
@ -40,11 +39,11 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
with reporter.step("Check get object in container on the first node, expected allow"): with reporter.step("Check get object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
get_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.get(container, object_id, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get object in container on the second node, epxected access denied error"): with reporter.step("Check get object in container on the second node, epxected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT): with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
get_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.get(container, object_id, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -55,15 +54,14 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
) )
@allure.title("LocalOverride: Allow to PutObject in root tenant") @allure.title("LocalOverride: Allow to PutObject in root tenant")
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_allow_to_put_object_root( def test_local_override_allow_to_put_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
simple_object_size: ObjectSize, grpc_client: GrpcClientWrapper,
container: str, container: str,
test_file: TestFile,
): ):
test_file = generate_file(simple_object_size.value)
with reporter.step("Create local override on first node"): with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule( frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(), endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
@ -75,11 +73,11 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
with reporter.step("Check put object in container on the first node, expected allow"): with reporter.step("Check put object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get object in container on the second node, epxected access denied error"): with reporter.step("Check put object in container on the second node, epxected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT): with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.put(test_file, container, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -92,8 +90,8 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
@allure.title("LocalOverride: Allow to HeadObject in root tenant") @allure.title("LocalOverride: Allow to HeadObject in root tenant")
def test_local_override_allow_to_head_object_root( def test_local_override_allow_to_head_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str, container: str,
object_id: str, object_id: str,
): ):
@ -108,11 +106,11 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
with reporter.step("Check head object in container on the first node, expected allow"): with reporter.step("Check head object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
head_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.head(container, object_id, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check head object in container on the second node, expected access denied error"): with reporter.step("Check head object in container on the second node, expected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT): with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
head_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.head(container, object_id, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -125,8 +123,8 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
@allure.title("LocalOverride: Allow to SearchObject in root tenant") @allure.title("LocalOverride: Allow to SearchObject in root tenant")
def test_local_override_allow_to_search_object_root( def test_local_override_allow_to_search_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str, container: str,
): ):
with reporter.step("Create local override on first node"): with reporter.step("Create local override on first node"):
@ -140,11 +138,11 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
with reporter.step("Check search object in container on the first node, expected allow"): with reporter.step("Check search object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
search_object(default_wallet, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.search(container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check search object from container on the second node, expected access denied error"): with reporter.step("Check search object from container on the second node, expected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT): with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
search_object(default_wallet, container, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.search(container, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -157,8 +155,8 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
@allure.title("LocalOverride: Allow to RangeObject in root tenant") @allure.title("LocalOverride: Allow to RangeObject in root tenant")
def test_local_override_allow_to_range_object_root( def test_local_override_allow_to_range_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str, container: str,
object_id: str, object_id: str,
): ):
@ -173,11 +171,11 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
with reporter.step("Check get range object in container on the first node, expected allow"): with reporter.step("Check get range object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
get_range(default_wallet, container, object_id, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.range(container, object_id, "0:10", self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check range object in container on the second node. expected access denied error"): with reporter.step("Check get range object in container on the second node, expected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT): with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
get_range(default_wallet, container, object_id, "0:10", self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.range(container, object_id, "0:10", self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -190,8 +188,8 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
@allure.title("LocalOverride: Allow to HashObject in root tenant") @allure.title("LocalOverride: Allow to HashObject in root tenant")
def test_local_override_allow_to_hash_object_root( def test_local_override_allow_to_hash_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str, container: str,
object_id: str, object_id: str,
): ):
@ -206,11 +204,11 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
with reporter.step("Check get range hash object in container on the first node, expected allow"): with reporter.step("Check get range hash object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
get_range_hash(default_wallet, container, object_id, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.hash(self.cluster.storage_nodes[0].get_rpc_endpoint(), container, object_id, range="0:10")
with reporter.step("Check get range hash object in container on the second node, expected access denied error"): with reporter.step("Check get range hash object in container on the second node, expected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT): with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
get_range_hash(default_wallet, container, object_id, "0:10", self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.hash(self.cluster.storage_nodes[1].get_rpc_endpoint(), container, object_id, range="0:10")
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -223,8 +221,8 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
@allure.title("LocalOverride: Allow to DeleteObject in root tenant") @allure.title("LocalOverride: Allow to DeleteObject in root tenant")
def test_local_override_allow_to_delete_object_root( def test_local_override_allow_to_delete_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str, container: str,
object_id: str, object_id: str,
): ):
@ -239,11 +237,11 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
with reporter.step("Check delete object from container on the second node, expected access denied error"): with reporter.step("Check delete object from container on the second node, expected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT): with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
delete_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.delete(container, object_id, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Check delete object in container on the first node, expected allow"): with reporter.step("Check delete object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
delete_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.delete(container, object_id, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -252,3 +250,55 @@ class TestApeLocalOverrideAllow(ClusterTestBase):
target_name=container, target_name=container,
chain_id="allowDeleteObject", chain_id="allowDeleteObject",
) )
@allure.title("LocalOverride: Allow to PatchObject in root tenant")
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_allow_to_patch_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
object_id: str,
test_file: TestFile,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowPatchObject",
rule=f"allow Object.Patch *",
)
with reporter.step("Check patch object in container on the second node, epxected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
grpc_client.object.patch(
container,
object_id,
self.cluster.storage_nodes[1].get_rpc_endpoint(),
ranges=["500:300"],
payloads=[test_file],
new_attrs="patched=false",
timeout="200s",
)
with reporter.step("Check patch object in container on the first node, expected allow"):
with expect_not_raises():
patched_oid = grpc_client.object.patch(
container,
object_id,
self.cluster.storage_nodes[0].get_rpc_endpoint(),
ranges=["100:200"],
payloads=[test_file],
new_attrs="patched=true",
timeout="200s",
)
assert patched_oid != object_id, "OID of patched object must be different from original one"
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowPatchObject",
)

View file

@ -1,20 +1,15 @@
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.error_patterns import RULE_ACCESS_DENIED_OBJECT
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED, RULE_ACCESS_DENIED_OBJECT from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.steps.cli.object import delete_object, get_object, get_range, get_range_hash, head_object, put_object, search_object
from frostfs_testlib.storage.dataclasses.ape import Operations
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
reporter = get_reporter()
REP2 = ContainerRequest("REP 2", ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="REP2_allow_all_ape") REP2 = ContainerRequest("REP 2", ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="REP2_allow_all_ape")
@ -25,15 +20,14 @@ REP2 = ContainerRequest("REP 2", ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="R
class TestApeLocalOverrideDeny(ClusterTestBase): class TestApeLocalOverrideDeny(ClusterTestBase):
@allure.title("LocalOverride: Deny to GetObject in root tenant") @allure.title("LocalOverride: Deny to GetObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True) @pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_get_object_root( def test_local_override_deny_to_get_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
simple_object_size: ObjectSize, grpc_client: GrpcClientWrapper,
container: str, container: str,
test_file: TestFile,
): ):
test_file = generate_file(simple_object_size.value)
with reporter.step("Create local override on first node"): with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule( frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(), endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
@ -44,15 +38,15 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
) )
with reporter.step("Put object in container on the first node"): with reporter.step("Put object in container on the first node"):
oid = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) oid = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get object from container on the first node, expected access denied error"): with reporter.step("Check get object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT): with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
get_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.get(container, oid, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get object from container on the second node, expected allow"): with reporter.step("Check get object from container on the second node, expected allow"):
with expect_not_raises(): with expect_not_raises():
get_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.get(container, oid, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -64,19 +58,18 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
with reporter.step("Check get object in container on the first node, expected allow"): with reporter.step("Check get object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
get_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.get(container, oid, self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to PutObject in root tenant") @allure.title("LocalOverride: Deny to PutObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True) @pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_put_object_root( def test_local_override_deny_to_put_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
simple_object_size: ObjectSize, grpc_client: GrpcClientWrapper,
container: str, container: str,
test_file: TestFile,
): ):
test_file = generate_file(simple_object_size.value)
with reporter.step("Create local override on first node"): with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule( frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(), endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
@ -87,14 +80,12 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
) )
with reporter.step("Check put object from container on the first node, expected access denied error"): with reporter.step("Check put object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=OBJECT_ACCESS_DENIED): with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check put object from container on the second node, expected allow"): with reporter.step("Check put object from container on the second node, expected allow"):
with expect_not_raises(): with expect_not_raises():
put_object( grpc_client.object.put(test_file, container, self.cluster.storage_nodes[1].get_rpc_endpoint())
default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint(), copies_number=3
)
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -106,19 +97,18 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
with reporter.step("Check get object in container on the first node, expected allow"): with reporter.step("Check get object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to HeadObject in root tenant") @allure.title("LocalOverride: Deny to HeadObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True) @pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_head_object_root( def test_local_override_deny_to_head_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
simple_object_size: ObjectSize, grpc_client: GrpcClientWrapper,
container: str, container: str,
test_file: TestFile,
): ):
test_file = generate_file(simple_object_size.value)
with reporter.step("Create local override on first node"): with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule( frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(), endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
@ -129,15 +119,15 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
) )
with reporter.step("Put object in container on the first node"): with reporter.step("Put object in container on the first node"):
oid = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) oid = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check head object from container on the first node, expected access denied error"): with reporter.step("Check head object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT): with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
head_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.head(container, oid, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check head object from container on the second node, expected allow"): with reporter.step("Check head object from container on the second node, expected allow"):
with expect_not_raises(): with expect_not_raises():
head_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.head(container, oid, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -149,14 +139,14 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
with reporter.step("Check head object in container on the first node, expected allow"): with reporter.step("Check head object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
head_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.head(container, oid, self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to SearchObject in root tenant") @allure.title("LocalOverride: Deny to SearchObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True) @pytest.mark.parametrize("container_request", [REP2], indirect=True)
def test_local_override_deny_to_search_object_root( def test_local_override_deny_to_search_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str, container: str,
): ):
with reporter.step("Create local override on first node"): with reporter.step("Create local override on first node"):
@ -169,12 +159,12 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
) )
with reporter.step("Check search object from container on the first node, expected access denied error"): with reporter.step("Check search object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT.format(operation=Operations.SEARCH_OBJECT)): with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
search_object(default_wallet, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.search(container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check search object from container on the second node, expected allow"): with reporter.step("Check search object from container on the second node, expected allow"):
with expect_not_raises(): with expect_not_raises():
search_object(default_wallet, container, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.search(container, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -186,19 +176,18 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
with reporter.step("Check search object in container on the first node, expected allow"): with reporter.step("Check search object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
search_object(default_wallet, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.search(container, self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to RangeObject in root tenant") @allure.title("LocalOverride: Deny to RangeObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True) @pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_range_object_root( def test_local_override_deny_to_range_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
simple_object_size: ObjectSize, grpc_client: GrpcClientWrapper,
container: str, container: str,
test_file: TestFile,
): ):
test_file = generate_file(simple_object_size.value)
with reporter.step("Create local override on first node"): with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule( frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(), endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
@ -209,15 +198,15 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
) )
with reporter.step("Put object in container on the first node"): with reporter.step("Put object in container on the first node"):
oid = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) oid = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check range object from container on the first node, expected access denied error"): with reporter.step("Check range object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT.format(operation=Operations.RANGE_OBJECT)): with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
get_range(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.range(container, oid, "0:10", self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get range object from container on the second node, expected allow"): with reporter.step("Check get range object from container on the second node, expected allow"):
with expect_not_raises(): with expect_not_raises():
get_range(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.range(container, oid, "0:10", self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -229,19 +218,18 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
with reporter.step("Check get range object in container on the first node, expected allow"): with reporter.step("Check get range object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
get_range(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.range(container, oid, "0:10", self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to HashObject in root tenant") @allure.title("LocalOverride: Deny to HashObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True) @pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_hash_object_root( def test_local_override_deny_to_hash_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
simple_object_size: ObjectSize, grpc_client: GrpcClientWrapper,
container: str, container: str,
test_file: TestFile,
): ):
test_file = generate_file(simple_object_size.value)
with reporter.step("Create local override on first node"): with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule( frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(), endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
@ -252,15 +240,15 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
) )
with reporter.step("Put object in container on the first node"): with reporter.step("Put object in container on the first node"):
oid = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) oid = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get range hash object from container on the first node, expected access denied error"): with reporter.step("Check get range hash object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT.format(operation=Operations.HASH_OBJECT)): with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
get_range_hash(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.hash(self.cluster.storage_nodes[0].get_rpc_endpoint(), container, oid, range="0:10")
with reporter.step("Check get range hash object from container on the second node, expected allow"): with reporter.step("Check get range hash object from container on the second node, expected allow"):
with expect_not_raises(): with expect_not_raises():
get_range_hash(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.hash(self.cluster.storage_nodes[1].get_rpc_endpoint(), container, oid, range="0:10")
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -272,19 +260,18 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
with reporter.step("Check get range hash object in container on the first node, expected allow"): with reporter.step("Check get range hash object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
get_range_hash(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.hash(self.cluster.storage_nodes[0].get_rpc_endpoint(), container, oid, range="0:10")
@allure.title("LocalOverride: Deny to DeleteObject in root tenant") @allure.title("LocalOverride: Deny to DeleteObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True) @pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_delete_object_root( def test_local_override_deny_to_delete_object_root(
self, self,
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli, frostfs_cli_on_first_node: FrostfsCli,
simple_object_size: ObjectSize, grpc_client: GrpcClientWrapper,
container: str, container: str,
test_file: TestFile,
): ):
test_file = generate_file(simple_object_size.value)
with reporter.step("Create local override on first node"): with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule( frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(), endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
@ -295,30 +282,26 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
) )
with reporter.step("Put objects in container on the first node"): with reporter.step("Put objects in container on the first node"):
oid_1 = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) oid_1 = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
oid_2 = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) oid_2 = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Search object in container on the first node"): with reporter.step("Search object in container on the first node"):
search_object_in_container_1 = search_object( search_object_in_container_1 = grpc_client.object.search(container, self.cluster.storage_nodes[0].get_rpc_endpoint())
default_wallet, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()
)
assert oid_1 in search_object_in_container_1, f"Object {oid_1} was not found" assert oid_1 in search_object_in_container_1, f"Object {oid_1} was not found"
assert oid_2 in search_object_in_container_1, f"Object {oid_2} was not found" assert oid_2 in search_object_in_container_1, f"Object {oid_2} was not found"
with reporter.step("Search object from container on the second node"): with reporter.step("Search object from container on the second node"):
search_object_in_container_2 = search_object( search_object_in_container_2 = grpc_client.object.search(container, self.cluster.storage_nodes[1].get_rpc_endpoint())
default_wallet, container, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()
)
assert oid_1 in search_object_in_container_2, f"Object {oid_1} was not found" assert oid_1 in search_object_in_container_2, f"Object {oid_1} was not found"
assert oid_2 in search_object_in_container_2, f"Object {oid_2} was not found" assert oid_2 in search_object_in_container_2, f"Object {oid_2} was not found"
with reporter.step("Check delete object from container on the first node, expected access denied error"): with reporter.step("Check delete object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT): with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
delete_object(default_wallet, container, oid_1, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.delete(container, oid_1, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check delete object from container on the second node, expected allow"): with reporter.step("Check delete object from container on the second node, expected allow"):
with expect_not_raises(): with expect_not_raises():
delete_object(default_wallet, container, oid_2, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()) grpc_client.object.delete(container, oid_2, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"): with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule( frostfs_cli_on_first_node.control.remove_rule(
@ -330,4 +313,70 @@ class TestApeLocalOverrideDeny(ClusterTestBase):
with reporter.step("Check delete object in container on the first node, expected allow"): with reporter.step("Check delete object in container on the first node, expected allow"):
with expect_not_raises(): with expect_not_raises():
delete_object(default_wallet, container, oid_1, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()) grpc_client.object.delete(container, oid_1, self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to PatchObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_patch_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
test_file: TestFile,
container: str,
object_id: str,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyPatchObject",
rule=f"deny Object.Patch /{container}/*",
)
with reporter.step("Check patch object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
grpc_client.object.patch(
container,
object_id,
self.cluster.storage_nodes[0].get_rpc_endpoint(),
ranges=["0:350"],
payloads=[test_file],
new_attrs="patched_by_first_node=false",
timeout="200s",
)
with reporter.step("Check patch object from container on the second node, expected allow"):
with expect_not_raises():
patched_oid_1 = grpc_client.object.patch(
container,
object_id,
self.cluster.storage_nodes[1].get_rpc_endpoint(),
ranges=["200:400"],
payloads=[test_file],
new_attrs="patched_by_second_node=true",
timeout="200s",
)
assert patched_oid_1 != object_id, "OID of patched object must be different from original one"
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyPatchObject",
)
with reporter.step("Check patch object in container on the first node, expected allow"):
with expect_not_raises():
patched_oid_2 = grpc_client.object.patch(
container,
patched_oid_1,
self.cluster.storage_nodes[0].get_rpc_endpoint(),
ranges=["600:0"],
payloads=[test_file],
new_attrs="patched_by_first_node=true",
timeout="200s",
)
assert patched_oid_1 != patched_oid_2, "OID of patched object must be different from original one"

View file

@ -1,36 +1,34 @@
import logging import logging
import random import random
from datetime import datetime, timedelta, timezone from datetime import datetime
from typing import Optional from typing import Optional
import allure import allure
import pytest import pytest
from dateutil import parser
from frostfs_testlib import plugins, reporter from frostfs_testlib import plugins, reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, S3HttpClient
from frostfs_testlib.clients.s3 import BucketContainerResolver, VersioningStatus
from frostfs_testlib.credentials.interfaces import CredentialsProvider, User from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.hosting import Hosting from frostfs_testlib.hosting import Hosting
from frostfs_testlib.resources import optionals from frostfs_testlib.resources import optionals
from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, SIMPLE_OBJECT_SIZE from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, SIMPLE_OBJECT_SIZE
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.shell import LocalShell, Shell from frostfs_testlib.shell import LocalShell, Shell
from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, FROSTFS_CLI_EXEC from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, FROSTFS_CLI_EXEC
from frostfs_testlib.steps.cli.object import get_netmap_netinfo from frostfs_testlib.steps.cli.object import get_netmap_netinfo
from frostfs_testlib.steps.epoch import ensure_fresh_epoch from frostfs_testlib.steps.epoch import ensure_fresh_epoch
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.testing.test_control import cached_fixture, run_optionally, wait_for_success from frostfs_testlib.testing.test_control import cached_fixture, run_optionally
from frostfs_testlib.utils import env_utils, string_utils, version_utils from frostfs_testlib.utils import env_utils, string_utils, version_utils
from frostfs_testlib.utils.file_utils import TestFile, generate_file from frostfs_testlib.utils.file_utils import TestFile, generate_file
@ -40,7 +38,6 @@ from ..resources.common import TEST_CYCLES_COUNT
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
SERVICE_ACTIVE_TIME = 20
WALLTETS_IN_POOL = 2 WALLTETS_IN_POOL = 2
@ -56,20 +53,6 @@ start_time = pytest.StashKey[int]()
test_outcome = pytest.StashKey[str]() test_outcome = pytest.StashKey[str]()
# pytest hook. Do not rename
def pytest_collection_modifyitems(items: list[pytest.Item]):
# Change order of tests based on @pytest.mark.order(<int>) marker
def order(item: pytest.Item) -> int:
order_marker = item.get_closest_marker("order")
if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)):
raise RuntimeError("Incorrect usage of pytest.mark.order")
order_value = order_marker.args[0] if order_marker else 0
return order_value
items.sort(key=lambda item: order(item))
# pytest hook. Do not rename # pytest hook. Do not rename
def pytest_collection_finish(session: pytest.Session): def pytest_collection_finish(session: pytest.Session):
items_total = len(session.items) items_total = len(session.items)
@ -169,7 +152,11 @@ def complex_object_size(max_object_size: int) -> ObjectSize:
# By default we want all tests to be executed with both object sizes # By default we want all tests to be executed with both object sizes
# This can be overriden in choosen tests if needed # This can be overriden in choosen tests if needed
@pytest.fixture( @pytest.fixture(
scope="session", params=[pytest.param("simple", marks=pytest.mark.simple), pytest.param("complex", marks=pytest.mark.complex)] scope="session",
params=[
pytest.param("simple", marks=[pytest.mark.simple, pytest.mark.exclude_sanity]),
pytest.param("complex", marks=pytest.mark.complex),
],
) )
def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize, request: pytest.FixtureRequest) -> ObjectSize: def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize, request: pytest.FixtureRequest) -> ObjectSize:
if request.param == "simple": if request.param == "simple":
@ -206,8 +193,25 @@ def ec_placement_policy() -> PlacementPolicy:
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@allure.title("Init Frostfs CLI") @allure.title("Init Frostfs CLI")
def frostfs_cli(client_shell: Shell, default_wallet: WalletInfo) -> FrostfsCli: def frostfs_cli(client_shell: Shell, wallet: WalletInfo) -> FrostfsCli:
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC, default_wallet.config_path) return FrostfsCli(client_shell, FROSTFS_CLI_EXEC, wallet.config_path)
@pytest.fixture(scope="session")
@allure.title("Init Frostfs CLI remote")
def remote_frostfs_cli(cluster: Cluster) -> FrostfsCli:
node = cluster.cluster_nodes[0]
host = node.host
service_config = host.get_service_config(node.storage_node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.storage_node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path)
return cli
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@ -216,6 +220,12 @@ def grpc_client(frostfs_cli: FrostfsCli) -> GrpcClientWrapper:
return CliClientWrapper(frostfs_cli) return CliClientWrapper(frostfs_cli)
@pytest.fixture(scope="session")
@allure.title("Init GrpcClientWrapper with remote Frostfs CLI")
def remote_grpc_client(remote_frostfs_cli: FrostfsCli) -> GrpcClientWrapper:
return CliClientWrapper(remote_frostfs_cli)
# By default we want all tests to be executed with both storage policies. # By default we want all tests to be executed with both storage policies.
# This can be overriden in choosen tests if needed. # This can be overriden in choosen tests if needed.
@pytest.fixture(scope="session", params=[pytest.param("rep", marks=pytest.mark.rep), pytest.param("ec", marks=pytest.mark.ec)]) @pytest.fixture(scope="session", params=[pytest.param("rep", marks=pytest.mark.rep), pytest.param("ec", marks=pytest.mark.ec)])
@ -282,25 +292,39 @@ def credentials_provider(cluster: Cluster) -> CredentialsProvider:
@pytest.fixture( @pytest.fixture(
scope="session", scope="session",
params=[ params=[
pytest.param(AwsCliClient, marks=[pytest.mark.aws, pytest.mark.weekly]), pytest.param(AwsCliClient, marks=[pytest.mark.aws, pytest.mark.weekly, pytest.mark.exclude_sanity]),
pytest.param(Boto3ClientWrapper, marks=[pytest.mark.boto3, pytest.mark.nightly]), pytest.param(Boto3ClientWrapper, marks=[pytest.mark.boto3, pytest.mark.nightly]),
], ],
) )
def s3_client( def s3_client(
default_user: User, user: User,
s3_policy: Optional[str], s3_policy: Optional[str],
cluster: Cluster, cluster: Cluster,
request: pytest.FixtureRequest, request: pytest.FixtureRequest,
credentials_provider: CredentialsProvider, credentials_provider: CredentialsProvider,
) -> S3ClientWrapper: ) -> S3ClientWrapper:
node = cluster.cluster_nodes[0] node = cluster.cluster_nodes[0]
credentials_provider.S3.provide(default_user, node, s3_policy) credentials_provider.S3.provide(user, node, s3_policy)
s3_client_cls = request.param s3_client_cls = request.param
client = s3_client_cls(default_user.s3_credentials.access_key, default_user.s3_credentials.secret_key, cluster.default_s3_gate_endpoint) client = s3_client_cls(user.s3_credentials.access_key, user.s3_credentials.secret_key, cluster.default_s3_gate_endpoint)
return client return client
@allure.title("[Session] Create S3 http client")
@pytest.fixture(scope="session")
def s3_http_client(
default_user: User, s3_policy: Optional[str], cluster: Cluster, credentials_provider: CredentialsProvider
) -> S3HttpClient:
node = cluster.cluster_nodes[0]
credentials_provider.S3.provide(default_user, node, s3_policy)
return S3HttpClient(
cluster.default_s3_gate_endpoint,
default_user.s3_credentials.access_key,
default_user.s3_credentials.secret_key,
)
@pytest.fixture @pytest.fixture
def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus: def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus:
if "param" in request.__dict__: if "param" in request.__dict__:
@ -386,51 +410,11 @@ def collect_binary_versions(hosting: Hosting, client_shell: Shell, request: pyte
env_utils.save_env_properties(file_path, all_versions) env_utils.save_env_properties(file_path, all_versions)
@reporter.step("[Autouse/Session] Test session start time")
@pytest.fixture(scope="session", autouse=True)
def session_start_time(configure_testlib):
start_time = datetime.utcnow()
return start_time
@allure.title("[Autouse/Session] After deploy healthcheck")
@pytest.fixture(scope="session", autouse=True)
@run_optionally(optionals.OPTIONAL_AUTOUSE_FIXTURES_ENABLED)
def after_deploy_healthcheck(cluster: Cluster):
with reporter.step("Wait for cluster readiness after deploy"):
parallel(readiness_on_node, cluster.cluster_nodes)
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def rpc_endpoint(cluster: Cluster): def rpc_endpoint(cluster: Cluster):
return cluster.default_rpc_endpoint return cluster.default_rpc_endpoint
@wait_for_success(60 * SERVICE_ACTIVE_TIME * 3, 60, title="Wait for {cluster_node} readiness")
def readiness_on_node(cluster_node: ClusterNode):
if "skip_readiness_check" in cluster_node.host.config.attributes and cluster_node.host.config.attributes["skip_readiness_check"]:
return
# TODO: Move to healtcheck classes
svc_name = cluster_node.service(StorageNode).get_service_systemctl_name()
with reporter.step(f"Check service {svc_name} is active"):
result = cluster_node.host.get_shell().exec(f"systemctl is-active {svc_name}")
assert "active" == result.stdout.strip(), f"Service {svc_name} should be in active state"
with reporter.step(f"Check service {svc_name} is active more than {SERVICE_ACTIVE_TIME} minutes"):
result = cluster_node.host.get_shell().exec(f"systemctl show {svc_name} --property ActiveEnterTimestamp | cut -d '=' -f 2")
start_time = parser.parse(result.stdout.strip())
current_time = datetime.now(tz=timezone.utc)
active_time = current_time - start_time
active_minutes = active_time.seconds // 60
active_seconds = active_time.seconds - active_minutes * 60
assert active_time > timedelta(
minutes=SERVICE_ACTIVE_TIME
), f"Service should be in active state more than {SERVICE_ACTIVE_TIME} minutes, current {active_minutes}m:{active_seconds}s"
@reporter.step("Prepare default user with wallet") @reporter.step("Prepare default user with wallet")
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES) @cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
@ -443,19 +427,51 @@ def default_user(credentials_provider: CredentialsProvider, cluster: Cluster) ->
return user return user
@reporter.step("Get wallet for default user")
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def default_wallet(default_user: User) -> WalletInfo: @cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
return default_user.wallet def users_pool(credentials_provider: CredentialsProvider, cluster: Cluster) -> list[User]:
users = [User(string_utils.unique_name("user-")) for _ in range(WALLTETS_IN_POOL)]
parallel(credentials_provider.GRPC.provide, users, cluster_node=cluster.cluster_nodes[0])
return users
@pytest.fixture(scope="session")
def user_tag(request: pytest.FixtureRequest) -> str:
tag = "default"
if "param" in request.__dict__:
tag = request.param
return tag
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES) @cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def wallets_pool(credentials_provider: CredentialsProvider, cluster: Cluster) -> list[WalletInfo]: @reporter.step("Create {user_tag} user")
users = [User(string_utils.unique_name("user-")) for _ in range(WALLTETS_IN_POOL)] def user(user_tag: str) -> User:
parallel(credentials_provider.GRPC.provide, users, cluster_node=cluster.cluster_nodes[0]) user = User(string_utils.unique_name("user-"))
user.attributes["tag"] = user_tag
return [user.wallet for user in users] return user
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def wallet(user: User, credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
return user.wallet
# TODO: Migrate tests to fixture wallet above
@reporter.step("Get wallet for default user")
@pytest.fixture(scope="session")
def default_wallet(wallet: WalletInfo) -> WalletInfo:
return wallet
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def wallets_pool(users_pool: list[User]) -> list[WalletInfo]:
return [user.wallet for user in users_pool]
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@ -524,26 +540,26 @@ def multiple_containers_request(request: pytest.FixtureRequest) -> ContainerRequ
@pytest.fixture @pytest.fixture
def container( def container(
default_wallet: WalletInfo, wallet: WalletInfo,
frostfs_cli: FrostfsCli, frostfs_cli: FrostfsCli,
client_shell: Shell, client_shell: Shell,
cluster: Cluster, cluster: Cluster,
rpc_endpoint: str, rpc_endpoint: str,
container_request: ContainerRequest, container_request: ContainerRequest,
) -> str: ) -> str:
return create_container_with_ape(container_request, frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint) return create_container_with_ape(container_request, frostfs_cli, wallet, client_shell, cluster, rpc_endpoint)
@pytest.fixture @pytest.fixture
def containers( def containers(
default_wallet: WalletInfo, wallet: WalletInfo,
frostfs_cli: FrostfsCli, frostfs_cli: FrostfsCli,
client_shell: Shell, client_shell: Shell,
cluster: Cluster, cluster: Cluster,
rpc_endpoint: str, rpc_endpoint: str,
multiple_containers_request: MultipleContainersRequest, multiple_containers_request: MultipleContainersRequest,
) -> list[str]: ) -> list[str]:
return create_containers_with_ape(frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint, multiple_containers_request) return create_containers_with_ape(frostfs_cli, wallet, client_shell, cluster, rpc_endpoint, multiple_containers_request)
@pytest.fixture() @pytest.fixture()

View file

@ -59,7 +59,7 @@ class TestContainer(ClusterTestBase):
self.tick_epoch() self.tick_epoch()
wait_for_container_deletion(wallet, cid, self.shell, rpc_endpoint) wait_for_container_deletion(wallet, cid, self.shell, rpc_endpoint)
@allure.title("Delete container without force (name={name})") @allure.title("Delete container without force")
@pytest.mark.smoke @pytest.mark.smoke
def test_container_deletion_no_force(self, container: str, default_wallet: WalletInfo, rpc_endpoint: str): def test_container_deletion_no_force(self, container: str, default_wallet: WalletInfo, rpc_endpoint: str):
with reporter.step("Delete container and check it was deleted"): with reporter.step("Delete container and check it was deleted"):
@ -68,6 +68,7 @@ class TestContainer(ClusterTestBase):
wait_for_container_deletion(default_wallet, container, self.shell, rpc_endpoint) wait_for_container_deletion(default_wallet, container, self.shell, rpc_endpoint)
@allure.title("Parallel container creation and deletion") @allure.title("Parallel container creation and deletion")
@pytest.mark.exclude_sanity
def test_container_creation_deletion_parallel(self, default_wallet: WalletInfo, rpc_endpoint: str): def test_container_creation_deletion_parallel(self, default_wallet: WalletInfo, rpc_endpoint: str):
containers_count = 3 containers_count = 3
wallet = default_wallet wallet = default_wallet

View file

@ -5,7 +5,9 @@ from frostfs_testlib.steps.cli.container import create_container, delete_contain
from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import get_netmap_snapshot from frostfs_testlib.steps.node_management import get_netmap_snapshot
from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.cli_utils import parse_netmap_output from frostfs_testlib.utils.cli_utils import parse_netmap_output
@ -169,7 +171,7 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(resulting_copies[0]).get_interface(Interfaces.MGMT)
with reporter.step(f"Check the node is selected from {placement_params['country']}"): with reporter.step(f"Check the node is selected from {placement_params['country']}"):
assert ( assert (
placement_params["country"] == netmap[node_address]["country"] placement_params["country"] == netmap[node_address]["country"]
@ -219,7 +221,7 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(resulting_copies[0]).get_interface(Interfaces.MGMT)
with reporter.step(f"Check the node is selected from {placement_params['country'][0]}"): with reporter.step(f"Check the node is selected from {placement_params['country'][0]}"):
assert ( assert (
not (placement_params["country"][1] == netmap[node_address]["country"]) not (placement_params["country"][1] == netmap[node_address]["country"])
@ -273,8 +275,8 @@ class TestPolicy(ClusterTestBase):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected from any country"): with reporter.step(f"Check two nodes are selected from any country"):
for node in resulting_copies: for storage_node in resulting_copies:
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
assert (placement_params["country"][1] == netmap[node_address]["country"]) or ( assert (placement_params["country"][1] == netmap[node_address]["country"]) or (
not (placement_params["country"][1] == netmap[node_address]["country"]) not (placement_params["country"][1] == netmap[node_address]["country"])
and (placement_params["country"][0] == netmap[node_address]["country"]) and (placement_params["country"][0] == netmap[node_address]["country"])
@ -326,8 +328,8 @@ class TestPolicy(ClusterTestBase):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected from {placement_params['country']}"): with reporter.step(f"Check two nodes are selected from {placement_params['country']}"):
for node in resulting_copies: for storage_node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
assert ( assert (
placement_params["country"] == netmap[node_address]["country"] placement_params["country"] == netmap[node_address]["country"]
), f"The node is selected from the wrong country. Got {netmap[node_address]['country']}" ), f"The node is selected from the wrong country. Got {netmap[node_address]['country']}"
@ -635,8 +637,8 @@ class TestPolicy(ClusterTestBase):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected from {' or '.join(placement_params['country'])}"): with reporter.step(f"Check two nodes are selected from {' or '.join(placement_params['country'])}"):
for node in resulting_copies: for storage_node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
assert ( assert (
(netmap[node_address]["country"] in placement_params["country"]) (netmap[node_address]["country"] in placement_params["country"])
or (netmap[node_address]["country"] in placement_params["country"]) or (netmap[node_address]["country"] in placement_params["country"])
@ -756,8 +758,8 @@ class TestPolicy(ClusterTestBase):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected not from {placement_params['country'][0]}"): with reporter.step(f"Check two nodes are selected not from {placement_params['country'][0]}"):
for node in resulting_copies: for storage_node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
assert not (placement_params["country"][0] == netmap[node_address]["country"]) or ( assert not (placement_params["country"][0] == netmap[node_address]["country"]) or (
not (placement_params["country"][0] == netmap[node_address]["country"]) not (placement_params["country"][0] == netmap[node_address]["country"])
and not (placement_params["country_code"] == netmap[node_address]["country_code"]) and not (placement_params["country_code"] == netmap[node_address]["country_code"])
@ -809,8 +811,8 @@ class TestPolicy(ClusterTestBase):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check three nodes are selected from any country"): with reporter.step(f"Check three nodes are selected from any country"):
for node in resulting_copies: for storage_node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
assert (placement_params["country"][0] == netmap[node_address]["country"]) or ( assert (placement_params["country"][0] == netmap[node_address]["country"]) or (
not (placement_params["country"][0] == netmap[node_address]["country"]) not (placement_params["country"][0] == netmap[node_address]["country"])
and (placement_params["country"][1] == netmap[node_address]["country"]) and (placement_params["country"][1] == netmap[node_address]["country"])
@ -864,8 +866,8 @@ class TestPolicy(ClusterTestBase):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
list_of_location = [] list_of_location = []
for node in resulting_copies: for storage_node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
list_of_location.append(netmap[node_address]["location"]) list_of_location.append(netmap[node_address]["location"])
with reporter.step(f"Check two or three nodes are selected from Russia and from any other country"): with reporter.step(f"Check two or three nodes are selected from Russia and from any other country"):

View file

@ -9,11 +9,12 @@ from frostfs_testlib.steps.cli.container import delete_container
from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import get_netmap_snapshot from frostfs_testlib.steps.node_management import get_netmap_snapshot
from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeNetmapInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel from frostfs_testlib.testing import parallel
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
@ -34,23 +35,36 @@ class TestPolicyWithPrice(ClusterTestBase):
def await_for_price_attribute_on_nodes(self): def await_for_price_attribute_on_nodes(self):
netmap = parse_netmap_output(get_netmap_snapshot(node=self.cluster.storage_nodes[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=self.cluster.storage_nodes[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
for node in self.cluster.storage_nodes: for node in self.cluster.cluster_nodes:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = node.get_interface(Interfaces.MGMT)
if netmap[node_address]["Price"] is None: if netmap[node_address]["Price"] is None:
return False return False
return True return True
@reporter.step("Set Pirce field on {cluster_node}")
def set_price_on_node(
self, cluster_node: ClusterNode, locode_price_map: dict[str, str], netmap: list[NodeNetmapInfo], config_manager: ConfigStateManager
):
node_address = cluster_node.get_interface(Interfaces.MGMT)
node_netmap = [netmap_entry for netmap_entry in netmap if netmap_entry.node == node_address]
assert node_netmap, f"No node found with address {node_address}: \n{netmap}"
price = locode_price_map[node_netmap[0].un_locode]
config_manager.set_on_node(cluster_node, StorageNode, {"node:attribute_5": f"Price:{price}"})
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def fill_field_price(self, cluster: Cluster, cluster_state_controller_session: ClusterStateController): def fill_field_price(self, cluster: Cluster, cluster_state_controller_session: ClusterStateController):
prices = ["15", "10", "65", "55"] locode_price_map = {
"RU MOW": "15",
"RU LED": "10",
"SE STO": "65",
"FI HEL": "55",
}
netmap = parse_netmap_output(get_netmap_snapshot(node=self.cluster.storage_nodes[0], shell=self.shell))
config_manager = cluster_state_controller_session.manager(ConfigStateManager) config_manager = cluster_state_controller_session.manager(ConfigStateManager)
parallel( parallel(self.set_price_on_node, cluster.cluster_nodes, locode_price_map, netmap, config_manager)
config_manager.set_on_node,
cluster.cluster_nodes,
StorageNode,
itertools.cycle([{"node:attribute_5": f"Price:{price}"} for price in prices]),
)
cluster_state_controller_session.wait_after_storage_startup() cluster_state_controller_session.wait_after_storage_startup()
self.tick_epoch() self.tick_epoch()
@ -108,7 +122,7 @@ class TestPolicyWithPrice(ClusterTestBase):
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(resulting_copies[0]).get_interface(Interfaces.MGMT)
with reporter.step(f"Check the node is selected with price <= {placement_params['Price']}"): with reporter.step(f"Check the node is selected with price <= {placement_params['Price']}"):
assert ( assert (
int(netmap[node_address]["Price"]) <= placement_params["Price"] int(netmap[node_address]["Price"]) <= placement_params["Price"]
@ -160,7 +174,7 @@ class TestPolicyWithPrice(ClusterTestBase):
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check the node is selected with price between 1 and 10"): with reporter.step(f"Check the node is selected with price between 1 and 10"):
for node in resulting_copies: for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert ( assert (
int(netmap[node_address]["Price"]) > placement_params["Price"][1] int(netmap[node_address]["Price"]) > placement_params["Price"][1]
and int(netmap[node_address]["Price"]) <= placement_params["Price"][0] and int(netmap[node_address]["Price"]) <= placement_params["Price"][0]
@ -212,7 +226,7 @@ class TestPolicyWithPrice(ClusterTestBase):
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with max and min prices"): with reporter.step(f"Check two nodes are selected with max and min prices"):
for node in resulting_copies: for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert ( assert (
int(netmap[node_address]["Price"]) > placement_params["Price"][1] int(netmap[node_address]["Price"]) > placement_params["Price"][1]
or int(netmap[node_address]["Price"]) < placement_params["Price"][0] or int(netmap[node_address]["Price"]) < placement_params["Price"][0]
@ -260,7 +274,7 @@ class TestPolicyWithPrice(ClusterTestBase):
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with price > {placement_params['Price']}"): with reporter.step(f"Check two nodes are selected with price > {placement_params['Price']}"):
for node in resulting_copies: for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert ( assert (
int(netmap[node_address]["Price"]) > placement_params["Price"] int(netmap[node_address]["Price"]) > placement_params["Price"]
), f"The node is selected with the wrong price. Got {netmap[node_address]}" ), f"The node is selected with the wrong price. Got {netmap[node_address]}"
@ -311,7 +325,7 @@ class TestPolicyWithPrice(ClusterTestBase):
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected not with country code '{placement_params['country_code']}'"): with reporter.step(f"Check two nodes are selected not with country code '{placement_params['country_code']}'"):
for node in resulting_copies: for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert ( assert (
not netmap[node_address]["country_code"] == placement_params["country_code"] not netmap[node_address]["country_code"] == placement_params["country_code"]
or not netmap[node_address]["country_code"] == placement_params["country_code"] or not netmap[node_address]["country_code"] == placement_params["country_code"]
@ -364,7 +378,7 @@ class TestPolicyWithPrice(ClusterTestBase):
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check all nodes are selected"): with reporter.step(f"Check all nodes are selected"):
for node in resulting_copies: for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert ( assert (
netmap[node_address]["un_locode"] in placement_params["un_locode"] netmap[node_address]["un_locode"] in placement_params["un_locode"]
or not netmap[node_address]["un_locode"] == placement_params["un_locode"][1] or not netmap[node_address]["un_locode"] == placement_params["un_locode"][1]
@ -420,7 +434,7 @@ class TestPolicyWithPrice(ClusterTestBase):
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with price < {placement_params['Price']}"): with reporter.step(f"Check two nodes are selected with price < {placement_params['Price']}"):
for node in resulting_copies: for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert ( assert (
int(netmap[node_address]["Price"]) < placement_params["Price"] int(netmap[node_address]["Price"]) < placement_params["Price"]
), f"The node is selected with the wrong price. Got {netmap[node_address]}" ), f"The node is selected with the wrong price. Got {netmap[node_address]}"
@ -471,7 +485,7 @@ class TestPolicyWithPrice(ClusterTestBase):
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check three nodes are selected not from {placement_params['continent']}"): with reporter.step(f"Check three nodes are selected not from {placement_params['continent']}"):
for node in resulting_copies: for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert ( assert (
int(netmap[node_address]["Price"]) < placement_params["Price"] int(netmap[node_address]["Price"]) < placement_params["Price"]
and not netmap[node_address]["continent"] == placement_params["continent"] and not netmap[node_address]["continent"] == placement_params["continent"]
@ -525,7 +539,7 @@ class TestPolicyWithPrice(ClusterTestBase):
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check all nodes are selected"): with reporter.step(f"Check all nodes are selected"):
for node in resulting_copies: for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert ( assert (
( (
int(netmap[node_address]["Price"]) > placement_params["Price"][1] int(netmap[node_address]["Price"]) > placement_params["Price"][1]
@ -576,7 +590,7 @@ class TestPolicyWithPrice(ClusterTestBase):
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(resulting_copies[0]).get_interface(Interfaces.MGMT)
with reporter.step(f"Check the node is selected with price >= {placement_params['Price']}"): with reporter.step(f"Check the node is selected with price >= {placement_params['Price']}"):
assert ( assert (
int(netmap[node_address]["Price"]) >= placement_params["Price"] int(netmap[node_address]["Price"]) >= placement_params["Price"]
@ -628,7 +642,7 @@ class TestPolicyWithPrice(ClusterTestBase):
netmap = get_netmap_param(netmap) netmap = get_netmap_param(netmap)
with reporter.step(f"Check all node are selected"): with reporter.step(f"Check all node are selected"):
for node in resulting_copies: for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert (netmap[node_address]["country"] in placement_params["country"]) or ( assert (netmap[node_address]["country"] in placement_params["country"]) or (
int(netmap[node_address]["Price"]) >= placement_params["Price"] int(netmap[node_address]["Price"]) >= placement_params["Price"]
), f"The node is selected from the wrong country or with wrong price. Got {netmap[node_address]}" ), f"The node is selected from the wrong country or with wrong price. Got {netmap[node_address]}"

View file

@ -1,79 +0,0 @@
import datetime
from time import sleep
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.steps.cli.object import neo_go_query_height
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import datetime_utils
@pytest.mark.order(20)
@pytest.mark.failover
class TestTime(ClusterTestBase):
@reporter.step("Neo-go should continue to release blocks")
def check_nodes_block(self, cluster_state_controller: ClusterStateController):
count_blocks = {}
with reporter.step("Get current block id"):
for cluster_node in self.cluster.cluster_nodes:
cluster_state_controller.get_node_date(cluster_node)
count_blocks[cluster_node] = neo_go_query_height(
shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint()
)["Latest block"]
with reporter.step("Wait for 3 blocks"):
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 3)
with reporter.step("Current block id should be higher than before"):
for cluster_node in self.cluster.cluster_nodes:
shell = cluster_node.host.get_shell()
now_block = neo_go_query_height(shell=shell, endpoint=cluster_node.morph_chain.get_http_endpoint())[
"Latest block"
]
assert count_blocks[cluster_node] < now_block
@pytest.fixture()
def node_time_synchronizer(self, cluster_state_controller: ClusterStateController) -> None:
cluster_state_controller.set_sync_date_all_nodes(status="inactive")
yield
cluster_state_controller.set_sync_date_all_nodes(status="active")
@allure.title("Changing hardware and system time")
def test_system_time(self, cluster_state_controller: ClusterStateController, node_time_synchronizer: None):
cluster_nodes = self.cluster.cluster_nodes
timezone_utc = datetime.timezone.utc
node_1, node_2, node_3 = cluster_nodes[0:3]
with reporter.step("On node 1, move the system time forward by 5 days"):
cluster_state_controller.change_node_date(
node_1, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=5))
)
self.check_nodes_block(cluster_state_controller)
with reporter.step("On node 2, move the system time back 5 days."):
cluster_state_controller.change_node_date(
node_2, (datetime.datetime.now(timezone_utc) - datetime.timedelta(days=5))
)
self.check_nodes_block(cluster_state_controller)
with reporter.step("On node 3, move the system time forward by 10 days"):
cluster_state_controller.change_node_date(
node_3, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=10))
)
self.check_nodes_block(cluster_state_controller)
with reporter.step("Return the time on all nodes to the current one"):
for cluster_node in self.cluster.cluster_nodes:
cluster_state_controller.restore_node_date(cluster_node)
self.check_nodes_block(cluster_state_controller)
with reporter.step("Reboot all nodes"):
cluster_state_controller.shutdown_cluster(mode="soft")
cluster_state_controller.start_stopped_hosts()
self.check_nodes_block(cluster_state_controller)

View file

@ -6,9 +6,9 @@ from time import sleep
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.steps import s3_helper
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import ( from frostfs_testlib.steps.node_management import (
@ -19,8 +19,6 @@ from frostfs_testlib.steps.node_management import (
remove_nodes_from_map_morph, remove_nodes_from_map_morph,
wait_for_node_to_be_ready, wait_for_node_to_be_ready,
) )
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.steps.s3.s3_helper import search_nodes_with_bucket
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
@ -137,7 +135,7 @@ class TestFailoverStorage(ClusterTestBase):
put_object = s3_client.put_object(bucket, file_path) put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name]) s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
node_bucket = search_nodes_with_bucket( node_bucket = s3_helper.search_nodes_with_bucket(
cluster=self.cluster, cluster=self.cluster,
bucket_name=bucket, bucket_name=bucket,
wallet=default_wallet, wallet=default_wallet,

View file

@ -18,7 +18,7 @@ from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.utils.failover_utils import wait_object_replication from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...helpers.container_request import PUBLIC_WITH_POLICY, REP_2_2_2_PUBLIC, requires_container from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, PUBLIC_WITH_POLICY, REP_2_2_2_PUBLIC, ContainerRequest, requires_container
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
STORAGE_NODE_COMMUNICATION_PORT = "8080" STORAGE_NODE_COMMUNICATION_PORT = "8080"
@ -193,7 +193,7 @@ class TestFailoverNetwork(ClusterTestBase):
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2) self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
with reporter.step("Get object for target nodes to data interfaces, expect false"): with reporter.step("Get object for target nodes to data interfaces, expect false"):
with pytest.raises(RuntimeError, match="can't create API client: can't init SDK client: gRPC dial: context deadline exceeded"): with pytest.raises(RuntimeError, match="can't create API client: can't init SDK client: context (deadline exceeded|canceled)"):
get_object( get_object(
wallet=default_wallet, wallet=default_wallet,
cid=storage_object.cid, cid=storage_object.cid,
@ -309,6 +309,11 @@ class TestFailoverNetwork(ClusterTestBase):
block_interface: Interfaces, block_interface: Interfaces,
other_interface: Interfaces, other_interface: Interfaces,
): ):
endpoint_id_map = {
Interfaces.DATA_O: 0,
Interfaces.DATA_1: 1,
}
endpoint_id = endpoint_id_map[other_interface]
cluster_nodes = self.cluster.cluster_nodes cluster_nodes = self.cluster.cluster_nodes
with reporter.step(f"Block {block_interface.value} interfaces"): with reporter.step(f"Block {block_interface.value} interfaces"):
cluster_state_controller.down_interface(cluster_nodes, block_interface.value) cluster_state_controller.down_interface(cluster_nodes, block_interface.value)
@ -320,28 +325,27 @@ class TestFailoverNetwork(ClusterTestBase):
cid = create_container( cid = create_container(
wallet=default_wallet, wallet=default_wallet,
shell=self.shell, shell=self.shell,
endpoint=f"{cluster_nodes[0].get_data_interface(other_interface.value)[0]}:8080", endpoint=cluster_nodes[0].storage_node.get_all_rpc_endpoint()[endpoint_id],
rule="REP 4 CBF 1", rule="REP 4 CBF 1",
) )
with reporter.step("Put object"): with reporter.step("Put object"):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
oid = put_object( oid = put_object(
wallet=default_wallet, wallet=default_wallet,
path=file_path, path=file_path,
cid=cid, cid=cid,
shell=self.shell, shell=self.shell,
endpoint=f"{cluster_nodes[0].get_data_interface(other_interface.value)[0]}:8080", endpoint=cluster_nodes[0].storage_node.get_all_rpc_endpoint()[endpoint_id],
) )
with reporter.step("Get object"): with reporter.step("Get object"):
file_get_path = get_object( get_object(
wallet=default_wallet, wallet=default_wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
shell=self.shell, shell=self.shell,
endpoint=f"{cluster_nodes[0].get_data_interface(other_interface.value)[0]}:8080", endpoint=cluster_nodes[0].storage_node.get_all_rpc_endpoint()[endpoint_id],
) )
with reporter.step("Restore interfaces all nodes"): with reporter.step("Restore interfaces all nodes"):
@ -350,6 +354,11 @@ class TestFailoverNetwork(ClusterTestBase):
@pytest.mark.interfaces @pytest.mark.interfaces
@pytest.mark.failover_baremetal @pytest.mark.failover_baremetal
@pytest.mark.parametrize(
"container_request",
[ContainerRequest(f"REP %NODE_COUNT% IN X CBF 1 SELECT %NODE_COUNT% FROM * AS X", APE_EVERYONE_ALLOW_ALL)],
indirect=True,
)
@pytest.mark.parametrize("interface", [Interfaces.INTERNAL_0, Interfaces.INTERNAL_1]) @pytest.mark.parametrize("interface", [Interfaces.INTERNAL_0, Interfaces.INTERNAL_1])
@allure.title("Down internal interfaces to all nodes(interface={interface})") @allure.title("Down internal interfaces to all nodes(interface={interface})")
def test_down_internal_interface( def test_down_internal_interface(
@ -360,6 +369,7 @@ class TestFailoverNetwork(ClusterTestBase):
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
restore_down_interfaces: None, restore_down_interfaces: None,
interface: Interfaces, interface: Interfaces,
container: str,
): ):
cluster_nodes = self.cluster.cluster_nodes cluster_nodes = self.cluster.cluster_nodes
latest_block = {} latest_block = {}
@ -376,29 +386,21 @@ class TestFailoverNetwork(ClusterTestBase):
with reporter.step("Tick 1 epoch and wait 2 block for sync all nodes"): with reporter.step("Tick 1 epoch and wait 2 block for sync all nodes"):
self.tick_epochs(1, alive_node=cluster_nodes[0].storage_node, wait_block=2) self.tick_epochs(1, alive_node=cluster_nodes[0].storage_node, wait_block=2)
with reporter.step("Create container"):
cid = create_container(
wallet=default_wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule="REP 4 CBF 1",
)
with reporter.step(f"Put object, after down {interface}"): with reporter.step(f"Put object, after down {interface}"):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
oid = put_object( oid = put_object(
wallet=default_wallet, wallet=default_wallet,
path=file_path, path=file_path,
cid=cid, cid=container,
shell=self.shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint, endpoint=self.cluster.default_rpc_endpoint,
) )
with reporter.step("Get object"): with reporter.step("Get object"):
file_get_path = get_object( get_object(
wallet=default_wallet, wallet=default_wallet,
cid=cid, cid=container,
oid=oid, oid=oid,
shell=self.shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint, endpoint=self.cluster.default_rpc_endpoint,
@ -411,7 +413,7 @@ class TestFailoverNetwork(ClusterTestBase):
now_block[cluster_node] = neo_go_query_height( now_block[cluster_node] = neo_go_query_height(
shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint() shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint()
) )
with reporter.step(f"Compare block"): with reporter.step("Compare block"):
for cluster_node, items in now_block.items(): for cluster_node, items in now_block.items():
with reporter.step( with reporter.step(
f"Node - {cluster_node.host_ip}, old block - {latest_block[cluster_node]['Latest block']}, " f"Node - {cluster_node.host_ip}, old block - {latest_block[cluster_node]['Latest block']}, "

View file

@ -36,7 +36,7 @@ from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import string_utils from frostfs_testlib.utils import string_utils
@ -343,9 +343,11 @@ class TestMaintenanceMode(ClusterTestBase):
def check_node_status(self, expected_status: NodeStatus, node_under_test: ClusterNode, frostfs_cli: FrostfsCli, rpc_endpoint: str): def check_node_status(self, expected_status: NodeStatus, node_under_test: ClusterNode, frostfs_cli: FrostfsCli, rpc_endpoint: str):
netmap = frostfs_cli.netmap.snapshot(rpc_endpoint).stdout netmap = frostfs_cli.netmap.snapshot(rpc_endpoint).stdout
all_snapshots = NetmapParser.snapshot_all_nodes(netmap) all_snapshots = NetmapParser.snapshot_all_nodes(netmap)
node_snapshot = [snapshot for snapshot in all_snapshots if node_under_test.host_ip == snapshot.node] node_snapshot = [snapshot for snapshot in all_snapshots if node_under_test.get_interface(Interfaces.MGMT) == snapshot.node]
if expected_status == NodeStatus.OFFLINE and not node_snapshot: if expected_status == NodeStatus.OFFLINE and not node_snapshot:
assert node_under_test.host_ip not in netmap, f"{node_under_test} status should be {expected_status}. See netmap:\n{netmap}" assert (
node_under_test.get_interface(Interfaces.MGMT) not in netmap
), f"{node_under_test} status should be {expected_status}. See netmap:\n{netmap}"
return return
assert node_snapshot, f"{node_under_test} status should be {expected_status}, but was not in netmap. See netmap:\n{netmap}" assert node_snapshot, f"{node_under_test} status should be {expected_status}, but was not in netmap. See netmap:\n{netmap}"
@ -422,7 +424,6 @@ class TestMaintenanceMode(ClusterTestBase):
with pytest.raises(RuntimeError, match=node_under_maintenance_error): with pytest.raises(RuntimeError, match=node_under_maintenance_error):
put_object(default_wallet, file_path, cid, self.shell, endpoint) put_object(default_wallet, file_path, cid, self.shell, endpoint)
@pytest.mark.sanity
@allure.title("MAINTENANCE and OFFLINE mode transitions") @allure.title("MAINTENANCE and OFFLINE mode transitions")
def test_mode_transitions( def test_mode_transitions(
self, self,

View file

@ -14,10 +14,11 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.utils.file_utils import TestFile, generate_file from frostfs_testlib.utils.file_utils import TestFile, generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container from ...helpers.container_request import PUBLIC_WITH_POLICY, REP_2_1_4_PUBLIC, ContainerRequest, requires_container
from ...helpers.utility import are_numbers_similar from ...helpers.utility import are_numbers_similar
@pytest.mark.order(-5)
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics @pytest.mark.metrics
class TestContainerMetrics(ClusterTestBase): class TestContainerMetrics(ClusterTestBase):
@ -133,7 +134,7 @@ class TestContainerMetrics(ClusterTestBase):
@allure.title("Container size metrics put {objects_count} objects (obj_size={object_size})") @allure.title("Container size metrics put {objects_count} objects (obj_size={object_size})")
@pytest.mark.parametrize("objects_count", [5, 10, 20]) @pytest.mark.parametrize("objects_count", [5, 10, 20])
@requires_container @requires_container(REP_2_1_4_PUBLIC)
def test_container_size_metrics_more_objects( def test_container_size_metrics_more_objects(
self, object_size: ObjectSize, default_wallet: WalletInfo, objects_count: int, container: str self, object_size: ObjectSize, default_wallet: WalletInfo, objects_count: int, container: str
): ):
@ -161,13 +162,22 @@ class TestContainerMetrics(ClusterTestBase):
tombstones_size += int(tombstone["header"]["payloadLength"]) tombstones_size += int(tombstone["header"]["payloadLength"])
with reporter.step(f"Check container size metrics, 'should be positive in all nodes'"): with reporter.step(f"Check container size metrics, 'should be positive in all nodes'"):
futures = parallel( with reporter.step("Search container nodes"):
get_metrics_value, self.cluster.cluster_nodes, command="frostfs_node_engine_container_size_bytes", cid=container container_nodes = search_nodes_with_container(
) wallet=default_wallet,
cid=container,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
cluster=self.cluster,
)
with reporter.step(f"Get metrics value from container nodes"):
futures = parallel(get_metrics_value, container_nodes, command="frostfs_node_engine_container_size_bytes", cid=container)
metrics_value_nodes = [future.result() for future in futures] metrics_value_nodes = [future.result() for future in futures]
for act_metric in metrics_value_nodes: for act_metric in metrics_value_nodes:
assert act_metric >= 0, "Metrics value is negative" assert act_metric >= 0, "Metrics value is negative"
assert sum(metrics_value_nodes) // len(self.cluster.cluster_nodes) == tombstones_size, "tomstone size of objects not correct"
with reporter.step(f"Check container size metrics for tombstone"):
assert sum(metrics_value_nodes) // len(container_nodes) == tombstones_size, "tomstone size of objects not correct"
@allure.title("Container metrics (policy={container_request})") @allure.title("Container metrics (policy={container_request})")
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -210,7 +220,17 @@ class TestContainerMetrics(ClusterTestBase):
self.tick_epoch() self.tick_epoch()
wait_for_container_deletion(default_wallet, container, shell=self.shell, endpoint=cluster.default_rpc_endpoint) wait_for_container_deletion(default_wallet, container, shell=self.shell, endpoint=cluster.default_rpc_endpoint)
with reporter.step(f"Check metrics value in each nodes, should not be show any result"): with reporter.step(f"Check metric {metric_name} in each nodes, should not be show any result"):
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name, cid=container) futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name, cid=container)
metrics_results = [future.result() for future in futures if future.result() is not None] metrics_results = [future.result() for future in futures if future.result() is not None]
assert len(metrics_results) == 0, f"Metrics value is not empty in Prometheus, actual value in nodes: {metrics_results}" assert len(metrics_results) == 0, f"Metrics value is not empty in Prometheus, actual value in nodes: {metrics_results}"
with reporter.step("Check metric 'frostfs_node_engine_container_size_bytes' in each nodes, should not be show any result"):
futures = parallel(
self.get_metrics_search_by_greps_parallel,
cluster.cluster_nodes,
command="frostfs_node_engine_container_size_bytes",
cid=container,
)
metrics_results = [future.result() for future in futures if future.result() is not None]
assert len(metrics_results) == 0, f"Metrics value is not empty, actual value in nodes: {metrics_results}"

View file

@ -0,0 +1,40 @@
import allure
from frostfs_testlib.testing import parallel
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.metrics import get_metrics_value
from frostfs_testlib.storage.cluster import ClusterNode, Cluster
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
@pytest.mark.order(-7)
@pytest.mark.nightly
@pytest.mark.metrics
class TestEpochMetrics(ClusterTestBase):
@reporter.step("Get metrics value from node: {node}")
def get_metrics_search_by_greps_parallel(self, node: ClusterNode, **greps):
try:
return get_metrics_value(node, parse_from_command=True, **greps)
except Exception as e:
return None
@allure.title("Check changes in metric frostfs_node_ir_epoch value")
def test_check_increase_epoch_metric(self, cluster: Cluster):
metric_name = "frostfs_node_ir_epoch"
with reporter.step("Get current value of metric: {metric_name} from each nodes"):
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name)
metrics_results = [future.result() for future in futures if future.result() is not None]
with reporter.step("Check that the metric values are the same in all nodes"):
assert len(set(metrics_results)) == 1, f"Metric {metric_name} values aren't same in all nodes"
assert len(metrics_results) == len(cluster.cluster_nodes), "Metrics are not available in some nodes"
with reporter.step("Tick epoch"):
self.tick_epoch(wait_block=2)
with reporter.step('Check that metric value increase'):
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name)
new_metrics_results = [future.result() for future in futures if future.result() is not None]
assert len(set(new_metrics_results)) == 1, f"Metric {metric_name} values aren't same in all nodes"
assert new_metrics_results[0] > metrics_results[0], "Metric value doesn't increased"

View file

@ -16,6 +16,7 @@ from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
@pytest.mark.order(-9)
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics @pytest.mark.metrics
class TestGarbageCollectorMetrics(ClusterTestBase): class TestGarbageCollectorMetrics(ClusterTestBase):

View file

@ -18,6 +18,7 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.order(-6)
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics @pytest.mark.metrics
class TestGRPCMetrics(ClusterTestBase): class TestGRPCMetrics(ClusterTestBase):
@ -30,6 +31,7 @@ class TestGRPCMetrics(ClusterTestBase):
@allure.title("GRPC metrics container operations") @allure.title("GRPC metrics container operations")
def test_grpc_metrics_container_operations(self, default_wallet: WalletInfo, cluster: Cluster): def test_grpc_metrics_container_operations(self, default_wallet: WalletInfo, cluster: Cluster):
operations_count = 10
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
with reporter.step("Select random node"): with reporter.step("Select random node"):
@ -55,12 +57,14 @@ class TestGRPCMetrics(ClusterTestBase):
metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="Get") metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="Get")
with reporter.step(f"Get container"): with reporter.step(f"Get container"):
get_container(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint()) for _ in range(operations_count):
get_container(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"): with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"):
metrics_counter_get += 1 metrics_counter_get += operations_count
check_metrics_counter( check_metrics_counter(
[node], [node],
operator=">=",
counter_exp=metrics_counter_get, counter_exp=metrics_counter_get,
command="grpc_server_handled_total", command="grpc_server_handled_total",
service="ContainerService", service="ContainerService",
@ -71,12 +75,14 @@ class TestGRPCMetrics(ClusterTestBase):
metrics_counter_list = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="List") metrics_counter_list = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="List")
with reporter.step(f"Get container list"): with reporter.step(f"Get container list"):
list_containers(default_wallet, self.shell, node.storage_node.get_rpc_endpoint()) for _ in range(operations_count):
list_containers(default_wallet, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=List, 'the counter should increase by 1'"): with reporter.step(f"Check gRPC metrics method=List, 'the counter should increase by 1'"):
metrics_counter_list += 1 metrics_counter_list += operations_count
check_metrics_counter( check_metrics_counter(
[node], [node],
operator=">=",
counter_exp=metrics_counter_list, counter_exp=metrics_counter_list,
command="grpc_server_handled_total", command="grpc_server_handled_total",
service="ContainerService", service="ContainerService",
@ -87,6 +93,7 @@ class TestGRPCMetrics(ClusterTestBase):
def test_grpc_metrics_object_operations( def test_grpc_metrics_object_operations(
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str, disable_policer self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str, disable_policer
): ):
operations_count = 10
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
with reporter.step("Select random node"): with reporter.step("Select random node"):
@ -96,12 +103,14 @@ class TestGRPCMetrics(ClusterTestBase):
metrics_counter_put = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Put") metrics_counter_put = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Put")
with reporter.step("Put object to selected node"): with reporter.step("Put object to selected node"):
oid = put_object(default_wallet, file_path, container, self.shell, node.storage_node.get_rpc_endpoint()) for _ in range(operations_count):
oid = put_object(default_wallet, file_path, container, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by 1'"): with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by {operations_count}'"):
metrics_counter_put += 1 metrics_counter_put += operations_count
check_metrics_counter( check_metrics_counter(
[node], [node],
operator=">=",
counter_exp=metrics_counter_put, counter_exp=metrics_counter_put,
command="grpc_server_handled_total", command="grpc_server_handled_total",
service="ObjectService", service="ObjectService",
@ -112,12 +121,14 @@ class TestGRPCMetrics(ClusterTestBase):
metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Get") metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Get")
with reporter.step(f"Get object"): with reporter.step(f"Get object"):
get_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint()) for _ in range(operations_count):
get_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"): with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by {operations_count}'"):
metrics_counter_get += 1 metrics_counter_get += operations_count
check_metrics_counter( check_metrics_counter(
[node], [node],
operator=">=",
counter_exp=metrics_counter_get, counter_exp=metrics_counter_get,
command="grpc_server_handled_total", command="grpc_server_handled_total",
service="ObjectService", service="ObjectService",
@ -128,12 +139,14 @@ class TestGRPCMetrics(ClusterTestBase):
metrics_counter_search = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Search") metrics_counter_search = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Search")
with reporter.step(f"Search object"): with reporter.step(f"Search object"):
search_object(default_wallet, container, self.shell, node.storage_node.get_rpc_endpoint()) for _ in range(operations_count):
search_object(default_wallet, container, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Search, 'the counter should increase by 1'"): with reporter.step(f"Check gRPC metrics method=Search, 'the counter should increase by {operations_count}'"):
metrics_counter_search += 1 metrics_counter_search += operations_count
check_metrics_counter( check_metrics_counter(
[node], [node],
operator=">=",
counter_exp=metrics_counter_search, counter_exp=metrics_counter_search,
command="grpc_server_handled_total", command="grpc_server_handled_total",
service="ObjectService", service="ObjectService",
@ -144,12 +157,14 @@ class TestGRPCMetrics(ClusterTestBase):
metrics_counter_head = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Head") metrics_counter_head = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Head")
with reporter.step(f"Head object"): with reporter.step(f"Head object"):
head_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint()) for _ in range(operations_count):
head_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Head, 'the counter should increase by 1'"): with reporter.step(f"Check gRPC metrics method=Head, 'the counter should increase by {operations_count}'"):
metrics_counter_head += 1 metrics_counter_head += operations_count
check_metrics_counter( check_metrics_counter(
[node], [node],
operator=">=",
counter_exp=metrics_counter_head, counter_exp=metrics_counter_head,
command="grpc_server_handled_total", command="grpc_server_handled_total",
service="ObjectService", service="ObjectService",

View file

@ -14,6 +14,7 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
@pytest.mark.order(-10)
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics @pytest.mark.metrics
class TestLogsMetrics(ClusterTestBase): class TestLogsMetrics(ClusterTestBase):
@ -34,7 +35,9 @@ class TestLogsMetrics(ClusterTestBase):
current_time = datetime.now(timezone.utc) current_time = datetime.now(timezone.utc)
counter_metrics = get_metrics_value(cluster_node, **metrics_greps) counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority) counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority)
assert counter_logs == counter_metrics, f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}" assert counter_logs == pytest.approx(
counter_metrics, rel=0.02
), f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
@staticmethod @staticmethod
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, until_time: datetime, log_priority: str): def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, until_time: datetime, log_priority: str):

View file

@ -17,6 +17,7 @@ from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container
@pytest.mark.order(-11)
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics @pytest.mark.metrics
class TestObjectMetrics(ClusterTestBase): class TestObjectMetrics(ClusterTestBase):
@ -88,6 +89,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter += metric_step objects_metric_counter += metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -107,6 +109,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter -= metric_step objects_metric_counter -= metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -135,6 +138,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter += metric_step objects_metric_counter += metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -151,6 +155,7 @@ class TestObjectMetrics(ClusterTestBase):
self.tick_epochs(epochs_to_tick=2) self.tick_epochs(epochs_to_tick=2)
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -163,6 +168,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter -= metric_step objects_metric_counter -= metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -190,6 +196,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter += metric_step objects_metric_counter += metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -209,6 +216,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter -= metric_step objects_metric_counter -= metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",

View file

@ -19,6 +19,7 @@ from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
@pytest.mark.order(-8)
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics @pytest.mark.metrics
class TestShardMetrics(ClusterTestBase): class TestShardMetrics(ClusterTestBase):
@ -87,6 +88,7 @@ class TestShardMetrics(ClusterTestBase):
@allure.title("Metric for shard mode") @allure.title("Metric for shard mode")
def test_shard_metrics_set_mode(self, two_shards_and_node: tuple[str, str, ClusterNode]): def test_shard_metrics_set_mode(self, two_shards_and_node: tuple[str, str, ClusterNode]):
metrics_counter = 1 metrics_counter = 1
metric_name_blobstore = "frostfs_node_blobstore_mode"
shard1, shard2, node = two_shards_and_node shard1, shard2, node = two_shards_and_node
with reporter.step("Shard1 set to mode 'read-only'"): with reporter.step("Shard1 set to mode 'read-only'"):
@ -101,6 +103,15 @@ class TestShardMetrics(ClusterTestBase):
shard_id=shard1, shard_id=shard1,
) )
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will change to 'READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_ONLY",
shard_id=shard1,
)
with reporter.step("Shard2 set to mode 'degraded-read-only'"): with reporter.step("Shard2 set to mode 'degraded-read-only'"):
node_shard_set_mode(node.storage_node, shard2, "degraded-read-only") node_shard_set_mode(node.storage_node, shard2, "degraded-read-only")
@ -113,6 +124,15 @@ class TestShardMetrics(ClusterTestBase):
shard_id=shard2, shard_id=shard2,
) )
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will save 'READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_ONLY",
shard_id=shard2,
)
with reporter.step("Both shards set to mode 'read-write'"): with reporter.step("Both shards set to mode 'read-write'"):
for shard in [shard1, shard2]: for shard in [shard1, shard2]:
node_shard_set_mode(node.storage_node, shard, "read-write") node_shard_set_mode(node.storage_node, shard, "read-write")
@ -127,6 +147,16 @@ class TestShardMetrics(ClusterTestBase):
shard_id=shard, shard_id=shard,
) )
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will change to 'READ_WRITE'"):
for shard in [shard1, shard2]:
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_WRITE",
shard_id=shard,
)
@allure.title("Metric for error count on shard") @allure.title("Metric for error count on shard")
@requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1")) @requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1"))
def test_shard_metrics_error_count( def test_shard_metrics_error_count(

View file

@ -314,6 +314,7 @@ class TestObjectApi(ClusterTestBase):
assert sorted(expected_oids) == sorted(result) assert sorted(expected_oids) == sorted(result)
@allure.title("Search objects with removed items (obj_size={object_size})") @allure.title("Search objects with removed items (obj_size={object_size})")
@pytest.mark.exclude_sanity
def test_object_search_should_return_tombstone_items( def test_object_search_should_return_tombstone_items(
self, self,
default_wallet: WalletInfo, default_wallet: WalletInfo,

View file

@ -2,6 +2,7 @@ import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import ( from frostfs_testlib.steps.cli.container import (
REP_2_FOR_3_NODES_PLACEMENT_RULE, REP_2_FOR_3_NODES_PLACEMENT_RULE,
@ -15,6 +16,8 @@ from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from pytest import FixtureRequest from pytest import FixtureRequest
@ -46,6 +49,11 @@ def bearer_token(frostfs_cli: FrostfsCli, temp_directory: str, user_container: S
return create_bearer_token(frostfs_cli, temp_directory, user_container.get_id(), rule, cluster.default_rpc_endpoint) return create_bearer_token(frostfs_cli, temp_directory, user_container.get_id(), rule, cluster.default_rpc_endpoint)
@pytest.fixture(scope="session")
def grpc_client_with_other_wallet(client_shell: Shell, other_wallet: WalletInfo) -> GrpcClientWrapper:
return CliClientWrapper(FrostfsCli(client_shell, FROSTFS_CLI_EXEC, other_wallet.config_path))
@pytest.fixture() @pytest.fixture()
def storage_objects( def storage_objects(
user_container: StorageContainer, user_container: StorageContainer,
@ -126,6 +134,8 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
bearer_token, bearer_token,
) )
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@allure.title("Wildcard APE rule contains all permissions (obj_size={object_size})") @allure.title("Wildcard APE rule contains all permissions (obj_size={object_size})")
def test_ape_wildcard_contains_all_rules( def test_ape_wildcard_contains_all_rules(
self, self,
@ -134,5 +144,27 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
bearer_token: str, bearer_token: str,
): ):
obj = storage_objects.pop() obj = storage_objects.pop()
with reporter.step(f"Assert all operations available with object"): with reporter.step("Assert all operations available with object"):
assert_full_access_to_container(other_wallet, obj.cid, obj.oid, obj.file_path, self.shell, self.cluster, bearer_token) assert_full_access_to_container(other_wallet, obj.cid, obj.oid, obj.file_path, self.shell, self.cluster, bearer_token)
# ^
@allure.title("Wildcard APE rule contains PATCH permission (obj_size={object_size})")
def test_ape_wildcard_contains_patch_rule(
self,
grpc_client_with_other_wallet: GrpcClientWrapper,
storage_objects: list[StorageObjectInfo],
bearer_token: str,
):
obj = storage_objects.pop()
with reporter.step("Verify patch is available"):
patched_oid = grpc_client_with_other_wallet.object.patch(
obj.cid,
obj.oid,
self.cluster.default_rpc_endpoint,
ranges=["99:88"],
payloads=[obj.file_path],
new_attrs="test-attribute=100",
bearer=bearer_token,
timeout="200s",
)
assert patched_oid != obj.oid, "OID of patched object must be different from original one"

View file

@ -7,7 +7,7 @@ from frostfs_testlib.storage.constants import PlacementRule
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
@ -733,9 +733,11 @@ class TestObjectApiPatch(ClusterTestBase):
@allure.title("[NEGATIVE] Patch cannot be applied to part of complex object (policy={placement_policy})") @allure.title("[NEGATIVE] Patch cannot be applied to part of complex object (policy={placement_policy})")
@pytest.mark.parametrize("placement_policy", ["rep"], indirect=True) @pytest.mark.parametrize("placement_policy", ["rep"], indirect=True)
@pytest.mark.parametrize("object_size", ["complex"], indirect=True) @pytest.mark.parametrize("object_size", ["complex"], indirect=True)
def test_patch_part_of_complex_object_rep(self, grpc_client: GrpcClientWrapper, container: str, original_object: str): def test_patch_part_of_complex_object_rep(
self, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, container: str, original_object: str
):
with reporter.step("Get parts of complex object"): with reporter.step("Get parts of complex object"):
parts = grpc_client.object.parts(container, original_object, self.cluster.cluster_nodes[0]) parts = remote_grpc_client.object.parts(container, original_object, self.cluster.cluster_nodes[0])
assert parts, f"Expected list of OIDs of object parts: {parts}" assert parts, f"Expected list of OIDs of object parts: {parts}"
part_oid = parts[0] part_oid = parts[0]
@ -752,9 +754,11 @@ class TestObjectApiPatch(ClusterTestBase):
@allure.title("[NEGATIVE] Patch cannot be applied to EC chunk (object_size={object_size}, policy={placement_policy})") @allure.title("[NEGATIVE] Patch cannot be applied to EC chunk (object_size={object_size}, policy={placement_policy})")
@pytest.mark.parametrize("placement_policy", ["ec"], indirect=True) @pytest.mark.parametrize("placement_policy", ["ec"], indirect=True)
def test_patch_ec_chunk(self, grpc_client: GrpcClientWrapper, container: str, original_object: str): def test_patch_ec_chunk(
self, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, container: str, original_object: str
):
with reporter.step("Get chunks of object"): with reporter.step("Get chunks of object"):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, original_object) chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, original_object)
assert chunks, f"Expected object chunks, but they are not there: {chunks}" assert chunks, f"Expected object chunks, but they are not there: {chunks}"
with reporter.step("Try patch chunk of object and catch exception"): with reporter.step("Try patch chunk of object and catch exception"):

View file

@ -1,58 +1,61 @@
import logging
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.steps.cli.object import get_object_from_random_node, head_object, put_object_to_random_node from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED, OBJECT_NOT_FOUND
from frostfs_testlib.steps.epoch import get_epoch from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_utils import TestFile from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
logger = logging.getLogger("NeoLogger")
@wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME) * 5, datetime_utils.parse_time(STORAGE_GC_TIME))
def wait_for_object_status_change_to(status: str, grpc_client: GrpcClientWrapper, cid: str, oid: str, endpoint: str) -> None:
with pytest.raises(Exception, match=status):
grpc_client.object.head(cid, oid, endpoint)
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
class TestObjectApiLifetime(ClusterTestBase): class TestObjectApiLifetime(ClusterTestBase):
@allure.title("Object is removed when lifetime expired (obj_size={object_size})") @allure.title("Object is removed when lifetime expired (obj_size={object_size}, policy={container_request.short_name})")
def test_object_api_lifetime(self, container: str, test_file: TestFile, default_wallet: WalletInfo): @pytest.mark.parametrize(
"container_request",
[
ContainerRequest(DEFAULT_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "REP 2"),
ContainerRequest(DEFAULT_EC_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "EC 3.1"),
],
)
def test_object_api_lifetime(self, grpc_client: GrpcClientWrapper, container: str, test_file: TestFile):
""" """
Test object deleted after expiration epoch. Test object deleted after expiration epoch.
""" """
wallet = default_wallet with reporter.step("Get current epoch"):
current_epoch = self.get_epoch()
last_active_epoch = current_epoch + 1
epoch = get_epoch(self.shell, self.cluster) with reporter.step("Put object to random node"):
oid = grpc_client.object.put_to_random_node(test_file, container, self.cluster, expire_at=last_active_epoch)
oid = put_object_to_random_node(wallet, test_file.path, container, self.shell, self.cluster, expire_at=epoch + 1) with reporter.step("Ensure that expiration of object has expected value"):
with expect_not_raises(): object_info: dict = grpc_client.object.head(container, oid, self.cluster.default_rpc_endpoint)
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint) expiration_epoch = int(object_info["header"]["attributes"]["__SYSTEM__EXPIRATION_EPOCH"])
assert expiration_epoch == last_active_epoch, f"Expiration time set for object is not expected: {expiration_epoch}"
with reporter.step("Tick two epochs"): with reporter.step("Tick two epoch for object expiration"):
self.tick_epochs(2) self.tick_epochs(2)
# Wait for GC, because object with expiration is counted as alive until GC removes it with reporter.step("Wait until GC marks object as 'already removed' or 'not found'"):
wait_for_gc_pass_on_storage_nodes() wait_for_object_status_change_to(
f"{OBJECT_ALREADY_REMOVED}|{OBJECT_NOT_FOUND}", grpc_client, container, oid, self.cluster.default_rpc_endpoint
)
with reporter.step("Check object deleted because it expires on epoch"): with reporter.step("Try to get object from random node and make sure it is really deleted"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND): with pytest.raises(Exception, match=f"{OBJECT_ALREADY_REMOVED}|{OBJECT_NOT_FOUND}"):
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint) grpc_client.object.get_from_random_node(container, oid, self.cluster)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, container, oid, self.shell, self.cluster)
with reporter.step("Tick additional epoch"):
self.tick_epoch()
wait_for_gc_pass_on_storage_nodes()
with reporter.step("Check object deleted because it expires on previous epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, container, oid, self.shell, self.cluster)

View file

@ -6,7 +6,7 @@ from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile from frostfs_testlib.utils.file_utils import TestFile

View file

@ -1,5 +1,6 @@
import logging import logging
import re import re
from typing import Literal
import allure import allure
import pytest import pytest
@ -8,6 +9,7 @@ from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.error_patterns import OBJECT_IS_LOCKED from frostfs_testlib.resources.error_patterns import OBJECT_IS_LOCKED
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
@ -16,20 +18,24 @@ from frostfs_testlib.utils.file_utils import TestFile, get_file_hash
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
def parse_oid(response: CommandResult, response_type: Literal["tombstone", "patch"] = None) -> str:
if response_type == "tombstone":
id_str = response.stdout.split("\n")[1]
oid = id_str.split(":")[1]
return oid.strip()
if response_type == "patch":
return response.stdout.split(":")[1].strip()
id_str = response.stdout.strip().split("\n")[-2]
oid = id_str.split(":")[1]
return oid.strip()
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.grpc_api @pytest.mark.grpc_api
@pytest.mark.grpc_without_user @pytest.mark.grpc_without_user
class TestObjectApiWithoutUser(ClusterTestBase): class TestObjectApiWithoutUser(ClusterTestBase):
def _parse_oid(self, stdout: str) -> str:
id_str = stdout.strip().split("\n")[-2]
oid = id_str.split(":")[1]
return oid.strip()
def _parse_tombstone_oid(self, stdout: str) -> str:
id_str = stdout.split("\n")[1]
tombstone = id_str.split(":")[1]
return tombstone.strip()
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def cli_without_wallet(self, client_shell: Shell) -> FrostfsCli: def cli_without_wallet(self, client_shell: Shell) -> FrostfsCli:
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC) return FrostfsCli(client_shell, FROSTFS_CLI_EXEC)
@ -86,7 +92,7 @@ class TestObjectApiWithoutUser(ClusterTestBase):
cli_without_wallet.container.search_node(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT) cli_without_wallet.container.search_node(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Put object into public container by native API with generate private key (obj_size={object_size})") @allure.title("Put object into public container by native API with generate private key (obj_size={object_size})")
def test_put_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str): def test_put_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
""" """
Validate `object put` into container with public ACL and flag `--generate-key`. Validate `object put` into container with public ACL and flag `--generate-key`.
""" """
@ -96,13 +102,13 @@ class TestObjectApiWithoutUser(ClusterTestBase):
result = cli_without_wallet.object.put( result = cli_without_wallet.object.put(
rpc_endpoint, rpc_endpoint,
container, container,
file_path, test_file,
generate_key=True, generate_key=True,
no_progress=True, no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
oid = self._parse_oid(result.stdout) oid = parse_oid(result)
with reporter.step("List objects with generate key"): with reporter.step("List objects with generate key"):
result = cli_without_wallet.container.list_objects(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT) result = cli_without_wallet.container.list_objects(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@ -112,24 +118,24 @@ class TestObjectApiWithoutUser(ClusterTestBase):
assert oid in objects, objects assert oid in objects, objects
@allure.title("Get public container object by native API with generate private key (obj_size={object_size})") @allure.title("Get public container object by native API with generate private key (obj_size={object_size})")
def test_get_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str): def test_get_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
""" """
Validate `object get` for container with public ACL and flag `--generate-key`. Validate `object get` for container with public ACL and flag `--generate-key`.
""" """
expected_hash = get_file_hash(file_path) expected_hash = get_file_hash(test_file)
with reporter.step("Put object with generate key"): with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put( result = cli_without_wallet.object.put(
rpc_endpoint, rpc_endpoint,
container, container,
file_path, test_file,
generate_key=True, generate_key=True,
no_progress=True, no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
oid = self._parse_oid(result.stdout) oid = parse_oid(result)
with reporter.step("Get object with generate key"): with reporter.step("Get object with generate key"):
with expect_not_raises(): with expect_not_raises():
@ -137,19 +143,19 @@ class TestObjectApiWithoutUser(ClusterTestBase):
rpc_endpoint, rpc_endpoint,
container, container,
oid, oid,
file=file_path, file=test_file,
generate_key=True, generate_key=True,
no_progress=True, no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
downloaded_hash = get_file_hash(file_path) downloaded_hash = get_file_hash(test_file)
with reporter.step("Validate downloaded file"): with reporter.step("Validate downloaded file"):
assert expected_hash == downloaded_hash assert expected_hash == downloaded_hash
@allure.title("Head public container object by native API with generate private key (obj_size={object_size})") @allure.title("Head public container object by native API with generate private key (obj_size={object_size})")
def test_head_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str): def test_head_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
""" """
Validate `object head` for container with public ACL and flag `--generate-key`. Validate `object head` for container with public ACL and flag `--generate-key`.
""" """
@ -158,20 +164,20 @@ class TestObjectApiWithoutUser(ClusterTestBase):
result = cli_without_wallet.object.put( result = cli_without_wallet.object.put(
rpc_endpoint, rpc_endpoint,
container, container,
file_path, test_file,
generate_key=True, generate_key=True,
no_progress=True, no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
oid = self._parse_oid(result.stdout) oid = parse_oid(result)
with reporter.step("Head object with generate key"): with reporter.step("Head object with generate key"):
with expect_not_raises(): with expect_not_raises():
cli_without_wallet.object.head(rpc_endpoint, container, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT) cli_without_wallet.object.head(rpc_endpoint, container, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Delete public container object by native API with generate private key (obj_size={object_size})") @allure.title("Delete public container object by native API with generate private key (obj_size={object_size})")
def test_delete_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str): def test_delete_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
""" """
Validate `object delete` for container with public ACL and flag `--generate key`. Validate `object delete` for container with public ACL and flag `--generate key`.
""" """
@ -180,19 +186,19 @@ class TestObjectApiWithoutUser(ClusterTestBase):
result = cli_without_wallet.object.put( result = cli_without_wallet.object.put(
rpc_endpoint, rpc_endpoint,
container, container,
file_path, test_file,
generate_key=True, generate_key=True,
no_progress=True, no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
oid = self._parse_oid(result.stdout) oid = parse_oid(result)
with reporter.step("Delete object with generate key"): with reporter.step("Delete object with generate key"):
with expect_not_raises(): with expect_not_raises():
result = cli_without_wallet.object.delete(rpc_endpoint, container, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT) result = cli_without_wallet.object.delete(rpc_endpoint, container, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
oid = self._parse_tombstone_oid(result.stdout) oid = parse_oid(result, response_type="tombstone")
with reporter.step("Head object with generate key"): with reporter.step("Head object with generate key"):
result = cli_without_wallet.object.head( result = cli_without_wallet.object.head(
@ -207,8 +213,37 @@ class TestObjectApiWithoutUser(ClusterTestBase):
object_type = re.search(r"(?<=type: )tombstone", result.stdout, re.IGNORECASE).group() object_type = re.search(r"(?<=type: )tombstone", result.stdout, re.IGNORECASE).group()
assert object_type == "TOMBSTONE", object_type assert object_type == "TOMBSTONE", object_type
@allure.title("Patch object in public container with generate private key (obj_size={object_size})")
def test_patch_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("Patch object with generate key"):
with expect_not_raises():
result = cli_without_wallet.object.patch(
rpc_endpoint,
container,
oid,
["0:500"],
[test_file],
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
patched_oid = parse_oid(result, response_type="patch")
assert oid != patched_oid, "Patched object must have new object id"
@allure.title("Lock public container object by native API with generate private key (obj_size={object_size})") @allure.title("Lock public container object by native API with generate private key (obj_size={object_size})")
def test_lock_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str): def test_lock_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
""" """
Validate `object lock` for container with public ACL and flag `--generate-key`. Validate `object lock` for container with public ACL and flag `--generate-key`.
Attempt to delete the locked object. Attempt to delete the locked object.
@ -218,13 +253,13 @@ class TestObjectApiWithoutUser(ClusterTestBase):
result = cli_without_wallet.object.put( result = cli_without_wallet.object.put(
rpc_endpoint, rpc_endpoint,
container, container,
file_path, test_file,
generate_key=True, generate_key=True,
no_progress=True, no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
oid = self._parse_oid(result.stdout) oid = parse_oid(result)
with reporter.step("Lock object with generate key"): with reporter.step("Lock object with generate key"):
with expect_not_raises(): with expect_not_raises():
@ -248,7 +283,7 @@ class TestObjectApiWithoutUser(ClusterTestBase):
) )
@allure.title("Search public container objects by native API with generate private key (obj_size={object_size})") @allure.title("Search public container objects by native API with generate private key (obj_size={object_size})")
def test_search_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str): def test_search_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
""" """
Validate `object search` for container with public ACL and flag `--generate-key`. Validate `object search` for container with public ACL and flag `--generate-key`.
""" """
@ -257,13 +292,13 @@ class TestObjectApiWithoutUser(ClusterTestBase):
result = cli_without_wallet.object.put( result = cli_without_wallet.object.put(
rpc_endpoint, rpc_endpoint,
container, container,
file_path, test_file,
generate_key=True, generate_key=True,
no_progress=True, no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
oid = self._parse_oid(result.stdout) oid = parse_oid(result)
with reporter.step("Object search with generate key"): with reporter.step("Object search with generate key"):
with expect_not_raises(): with expect_not_raises():
@ -274,7 +309,7 @@ class TestObjectApiWithoutUser(ClusterTestBase):
assert oid in object_ids assert oid in object_ids
@allure.title("Get range of public container object by native API with generate private key (obj_size={object_size})") @allure.title("Get range of public container object by native API with generate private key (obj_size={object_size})")
def test_range_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str): def test_range_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
""" """
Validate `object range` for container with public ACL and `--generate-key`. Validate `object range` for container with public ACL and `--generate-key`.
""" """
@ -283,13 +318,13 @@ class TestObjectApiWithoutUser(ClusterTestBase):
result = cli_without_wallet.object.put( result = cli_without_wallet.object.put(
rpc_endpoint, rpc_endpoint,
container, container,
file_path, test_file,
generate_key=True, generate_key=True,
no_progress=True, no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
oid = self._parse_oid(result.stdout) oid = parse_oid(result)
with reporter.step("Get range of object with generate key"): with reporter.step("Get range of object with generate key"):
with expect_not_raises(): with expect_not_raises():
@ -298,13 +333,13 @@ class TestObjectApiWithoutUser(ClusterTestBase):
container, container,
oid, oid,
"0:10", "0:10",
file=file_path, file=test_file,
generate_key=True, generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
@allure.title("Get hash of public container object by native API with generate private key (obj_size={object_size})") @allure.title("Get hash of public container object by native API with generate private key (obj_size={object_size})")
def test_hash_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str): def test_hash_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
""" """
Validate `object hash` for container with public ACL and `--generate-key`. Validate `object hash` for container with public ACL and `--generate-key`.
""" """
@ -313,13 +348,13 @@ class TestObjectApiWithoutUser(ClusterTestBase):
result = cli_without_wallet.object.put( result = cli_without_wallet.object.put(
rpc_endpoint, rpc_endpoint,
container, container,
file_path, test_file,
generate_key=True, generate_key=True,
no_progress=True, no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
oid = self._parse_oid(result.stdout) oid = parse_oid(result)
with reporter.step("Get range hash of object with generate key"): with reporter.step("Get range hash of object with generate key"):
with expect_not_raises(): with expect_not_raises():
@ -333,7 +368,7 @@ class TestObjectApiWithoutUser(ClusterTestBase):
) )
@allure.title("Get public container object nodes by native API with generate private key (obj_size={object_size})") @allure.title("Get public container object nodes by native API with generate private key (obj_size={object_size})")
def test_nodes_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str): def test_nodes_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
""" """
Validate `object nodes` for container with public ACL and `--generate-key`. Validate `object nodes` for container with public ACL and `--generate-key`.
""" """
@ -342,13 +377,13 @@ class TestObjectApiWithoutUser(ClusterTestBase):
result = cli_without_wallet.object.put( result = cli_without_wallet.object.put(
rpc_endpoint, rpc_endpoint,
container, container,
file_path, test_file,
no_progress=True, no_progress=True,
generate_key=True, generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT, timeout=CLI_DEFAULT_TIMEOUT,
) )
oid = self._parse_oid(result.stdout) oid = parse_oid(result)
with reporter.step("Configure frostfs-cli for alive remote node"): with reporter.step("Configure frostfs-cli for alive remote node"):
alive_node = self.cluster.cluster_nodes[0] alive_node = self.cluster.cluster_nodes[0]

View file

@ -7,16 +7,16 @@ import yaml
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.cli.netmap_parser import NetmapParser
from frostfs_testlib.clients import AwsCliClient, S3ClientWrapper
from frostfs_testlib.clients.s3 import BucketContainerResolver, VersioningStatus
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, HOSTING_CONFIG_FILE, MORPH_BLOCK_TIME from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, HOSTING_CONFIG_FILE, MORPH_BLOCK_TIME
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper
from frostfs_testlib.s3.interfaces import BucketContainerResolver, VersioningStatus
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
@ -145,13 +145,13 @@ class TestECReplication(ClusterTestBase):
@allure.title("Create container with EC policy (size={object_size})") @allure.title("Create container with EC policy (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1")) @requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_create_container_with_ec_policy( def test_create_container_with_ec_policy(
self, container: str, rep_count: int, grpc_client: GrpcClientWrapper, test_file: TestFile self, container: str, rep_count: int, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, test_file: TestFile
) -> None: ) -> None:
with reporter.step("Put object in container."): with reporter.step("Put object in container."):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check replication chunks."): with reporter.step("Check replication chunks."):
assert self.check_replication(rep_count, grpc_client, container, oid) assert self.check_replication(rep_count, remote_grpc_client, container, oid)
@allure.title("Lose node with chunk data") @allure.title("Lose node with chunk data")
@pytest.mark.failover @pytest.mark.failover
@ -159,6 +159,7 @@ class TestECReplication(ClusterTestBase):
def test_lose_node_with_data_chunk( def test_lose_node_with_data_chunk(
self, self,
grpc_client: GrpcClientWrapper, grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
container: str, container: str,
@ -169,10 +170,10 @@ class TestECReplication(ClusterTestBase):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check chunk replication on 4 nodes."): with reporter.step("Check chunk replication on 4 nodes."):
assert self.check_replication(4, grpc_client, container, oid) assert self.check_replication(4, remote_grpc_client, container, oid)
with reporter.step("Search node data chunk"): with reporter.step("Search node data chunk"):
chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid) chunk = remote_grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk) chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)
with reporter.step("Stop node with data chunk."): with reporter.step("Stop node with data chunk."):
@ -184,7 +185,7 @@ class TestECReplication(ClusterTestBase):
with reporter.step("Start stopped node, and check replication chunks."): with reporter.step("Start stopped node, and check replication chunks."):
cluster_state_controller.start_node_host(chunk_node[0]) cluster_state_controller.start_node_host(chunk_node[0])
self.wait_replication(4, grpc_client, container, oid) self.wait_replication(4, remote_grpc_client, container, oid)
@allure.title("Lose node with chunk parity") @allure.title("Lose node with chunk parity")
@pytest.mark.failover @pytest.mark.failover
@ -192,6 +193,7 @@ class TestECReplication(ClusterTestBase):
def test_lose_node_with_parity_chunk( def test_lose_node_with_parity_chunk(
self, self,
grpc_client: GrpcClientWrapper, grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
container: str, container: str,
@ -202,11 +204,11 @@ class TestECReplication(ClusterTestBase):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check chunk replication on 4 nodes."): with reporter.step("Check chunk replication on 4 nodes."):
assert self.check_replication(4, grpc_client, container, oid) assert self.check_replication(4, remote_grpc_client, container, oid)
with reporter.step("Search node with parity chunk"): with reporter.step("Search node with parity chunk"):
chunk = grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, container, oid=oid) chunk = remote_grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, container, oid=oid)
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)[0] chunk_node = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)[0]
with reporter.step("Stop node parity chunk."): with reporter.step("Stop node parity chunk."):
cluster_state_controller.stop_node_host(chunk_node, "hard") cluster_state_controller.stop_node_host(chunk_node, "hard")
@ -217,7 +219,7 @@ class TestECReplication(ClusterTestBase):
with reporter.step("Start stoped node, and check replication chunks."): with reporter.step("Start stoped node, and check replication chunks."):
cluster_state_controller.start_node_host(chunk_node) cluster_state_controller.start_node_host(chunk_node)
self.wait_replication(4, grpc_client, container, oid) self.wait_replication(4, remote_grpc_client, container, oid)
@allure.title("Lose nodes with chunk data and parity") @allure.title("Lose nodes with chunk data and parity")
@pytest.mark.failover @pytest.mark.failover
@ -225,6 +227,7 @@ class TestECReplication(ClusterTestBase):
def test_lose_nodes_data_chunk_and_parity( def test_lose_nodes_data_chunk_and_parity(
self, self,
grpc_client: GrpcClientWrapper, grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
container: str, container: str,
@ -235,13 +238,13 @@ class TestECReplication(ClusterTestBase):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check count chunks, expect 4."): with reporter.step("Check count chunks, expect 4."):
assert self.check_replication(4, grpc_client, container, oid) assert self.check_replication(4, remote_grpc_client, container, oid)
with reporter.step("Search node data chunk and node parity chunk"): with reporter.step("Search node data chunk and node parity chunk"):
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid) data_chunk = remote_grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
data_chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)[0] data_chunk_node = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)[0]
parity_chunk = grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, container, oid=oid) parity_chunk = remote_grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, container, oid=oid)
parity_chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, parity_chunk)[0] parity_chunk_node = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, parity_chunk)[0]
with reporter.step("Stop node with data chunk."): with reporter.step("Stop node with data chunk."):
cluster_state_controller.stop_node_host(data_chunk_node, "hard") cluster_state_controller.stop_node_host(data_chunk_node, "hard")
@ -252,7 +255,7 @@ class TestECReplication(ClusterTestBase):
with reporter.step("Start stopped host and check chunks."): with reporter.step("Start stopped host and check chunks."):
cluster_state_controller.start_node_host(data_chunk_node) cluster_state_controller.start_node_host(data_chunk_node)
self.wait_replication(4, grpc_client, container, oid) self.wait_replication(4, remote_grpc_client, container, oid)
with reporter.step("Stop node with parity chunk and one all node."): with reporter.step("Stop node with parity chunk and one all node."):
cluster_state_controller.stop_node_host(data_chunk_node, "hard") cluster_state_controller.stop_node_host(data_chunk_node, "hard")
@ -264,7 +267,7 @@ class TestECReplication(ClusterTestBase):
with reporter.step("Start stopped nodes and check replication chunk."): with reporter.step("Start stopped nodes and check replication chunk."):
cluster_state_controller.start_stopped_hosts() cluster_state_controller.start_stopped_hosts()
self.wait_replication(4, grpc_client, container, oid) self.wait_replication(4, remote_grpc_client, container, oid)
@allure.title("Policer work with chunk") @allure.title("Policer work with chunk")
@pytest.mark.failover @pytest.mark.failover
@ -273,6 +276,7 @@ class TestECReplication(ClusterTestBase):
self, self,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
grpc_client: GrpcClientWrapper, grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
container: str, container: str,
include_excluded_nodes: None, include_excluded_nodes: None,
@ -282,12 +286,12 @@ class TestECReplication(ClusterTestBase):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check count chunks nodes on 3."): with reporter.step("Check count chunks nodes on 3."):
assert self.check_replication(3, grpc_client, container, oid) assert self.check_replication(3, remote_grpc_client, container, oid)
with reporter.step("Search node with chunk."): with reporter.step("Search node with chunk."):
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid) data_chunk = remote_grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
node_data_chunk = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)[0] node_data_chunk = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)[0]
first_all_chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid) first_all_chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
with reporter.step("Remove chunk node from network map"): with reporter.step("Remove chunk node from network map"):
cluster_state_controller.remove_node_from_netmap([node_data_chunk.storage_node]) cluster_state_controller.remove_node_from_netmap([node_data_chunk.storage_node])
@ -300,10 +304,10 @@ class TestECReplication(ClusterTestBase):
node = grpc_client.object.chunks.search_node_without_chunks( node = grpc_client.object.chunks.search_node_without_chunks(
first_all_chunks, self.cluster, alive_node.storage_node.get_rpc_endpoint() first_all_chunks, self.cluster, alive_node.storage_node.get_rpc_endpoint()
)[0] )[0]
self.wait_replication(3, grpc_client, container, oid) self.wait_replication(3, remote_grpc_client, container, oid)
with reporter.step("Get new chunks"): with reporter.step("Get new chunks"):
second_all_chunks = grpc_client.object.chunks.get_all(node.storage_node.get_rpc_endpoint(), container, oid) second_all_chunks = remote_grpc_client.object.chunks.get_all(node.storage_node.get_rpc_endpoint(), container, oid)
with reporter.step("Check that oid no change."): with reporter.step("Check that oid no change."):
assert [chunk for chunk in second_all_chunks if data_chunk.object_id == chunk.object_id] assert [chunk for chunk in second_all_chunks if data_chunk.object_id == chunk.object_id]
@ -311,11 +315,17 @@ class TestECReplication(ClusterTestBase):
with reporter.step("Include node in netmap"): with reporter.step("Include node in netmap"):
cluster_state_controller.include_node_to_netmap(node_data_chunk.storage_node, alive_node.storage_node) cluster_state_controller.include_node_to_netmap(node_data_chunk.storage_node, alive_node.storage_node)
self.wait_sync_count_chunks_nodes(grpc_client, container, oid, 3) self.wait_sync_count_chunks_nodes(remote_grpc_client, container, oid, 3)
@allure.title("EC X.Y combinations (nodes={node_count},policy={ec_policy},size={object_size})") @allure.title("EC X.Y combinations (nodes={node_count},policy={ec_policy},size={object_size})")
def test_create_container_with_difference_count_nodes( def test_create_container_with_difference_count_nodes(
self, frostfs_cli: FrostfsCli, node_count: int, ec_policy: str, object_size: ObjectSize, grpc_client: GrpcClientWrapper self,
frostfs_cli: FrostfsCli,
node_count: int,
ec_policy: str,
object_size: ObjectSize,
grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
) -> None: ) -> None:
with reporter.step("Create container."): with reporter.step("Create container."):
expected_chunks = int(ec_policy.split(" ")[1].split(".")[0]) + int(ec_policy.split(" ")[1].split(".")[1]) expected_chunks = int(ec_policy.split(" ")[1].split(".")[0]) + int(ec_policy.split(" ")[1].split(".")[1])
@ -336,7 +346,7 @@ class TestECReplication(ClusterTestBase):
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check count object chunks."): with reporter.step("Check count object chunks."):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid) chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
assert len(chunks) == expected_chunks assert len(chunks) == expected_chunks
with reporter.step("get object and check hash."): with reporter.step("get object and check hash."):
@ -345,13 +355,15 @@ class TestECReplication(ClusterTestBase):
@allure.title("Request PUT with copies_number flag") @allure.title("Request PUT with copies_number flag")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1")) @requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_put_object_with_copies_number(self, container: str, grpc_client: GrpcClientWrapper, simple_object_size: ObjectSize) -> None: def test_put_object_with_copies_number(
self, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, simple_object_size: ObjectSize
) -> None:
with reporter.step("Put object in container with copies number = 1"): with reporter.step("Put object in container with copies number = 1"):
test_file = generate_file(simple_object_size.value) test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint, copies_number=1) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint, copies_number=1)
with reporter.step("Check that count chunks > 1."): with reporter.step("Check that count chunks > 1."):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid) chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
assert len(chunks) > 1 assert len(chunks) > 1
@allure.title("Request PUT and 1 node off") @allure.title("Request PUT and 1 node off")
@ -373,13 +385,15 @@ class TestECReplication(ClusterTestBase):
@allure.title("Request PUT (size={object_size})") @allure.title("Request PUT (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1")) @requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_put_object_with_ec_cnr(self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None: def test_put_object_with_ec_cnr(
self, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, object_size: ObjectSize
) -> None:
with reporter.step("Put object in container"): with reporter.step("Put object in container"):
test_file = generate_file(object_size.value) test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Get chunks object."): with reporter.step("Get chunks object."):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid) chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
with reporter.step("Check header chunks object"): with reporter.step("Check header chunks object"):
for chunk in chunks: for chunk in chunks:
@ -390,14 +404,16 @@ class TestECReplication(ClusterTestBase):
@allure.title("Request GET (size={object_size})") @allure.title("Request GET (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1 CBF 1")) @requires_container(PUBLIC_WITH_POLICY("EC 2.1 CBF 1"))
def test_get_object_in_ec_cnr(self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None: def test_get_object_in_ec_cnr(
self, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, object_size: ObjectSize
) -> None:
with reporter.step("Put object in container"): with reporter.step("Put object in container"):
test_file = generate_file(object_size.value) test_file = generate_file(object_size.value)
hash_origin_file = get_file_hash(test_file) hash_origin_file = get_file_hash(test_file)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Get id all chunks."): with reporter.step("Get id all chunks."):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid) chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
with reporter.step("Search chunk node and not chunks node."): with reporter.step("Search chunk node and not chunks node."):
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunks[0])[0] chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunks[0])[0]
@ -428,26 +444,30 @@ class TestECReplication(ClusterTestBase):
@allure.title("Request SEARCH check valid chunk id (size={object_size})") @allure.title("Request SEARCH check valid chunk id (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1")) @requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_search_object_in_ec_cnr_chunk_id(self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None: def test_search_object_in_ec_cnr_chunk_id(
self, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, object_size: ObjectSize
) -> None:
with reporter.step("Put object in container"): with reporter.step("Put object in container"):
test_file = generate_file(object_size.value) test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Search operation object"): with reporter.step("Search operation object"):
search_output = grpc_client.object.search(container, self.cluster.default_rpc_endpoint) search_output = grpc_client.object.search(container, self.cluster.default_rpc_endpoint)
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid) chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
for chunk in chunks: for chunk in chunks:
assert chunk.object_id in search_output assert chunk.object_id in search_output
@allure.title("Request SEARCH check no chunk index info (size={object_size})") @allure.title("Request SEARCH check no chunk index info (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1")) @requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_search_object_in_ec_cnr(self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None: def test_search_object_in_ec_cnr(
self, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, object_size: ObjectSize
) -> None:
with reporter.step("Put object in container"): with reporter.step("Put object in container"):
test_file = generate_file(object_size.value) test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Search operation all chunk"): with reporter.step("Search operation all chunk"):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid) chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
for chunk in chunks: for chunk in chunks:
chunk_search = grpc_client.object.search(container, self.cluster.default_rpc_endpoint, oid=chunk.object_id) chunk_search = grpc_client.object.search(container, self.cluster.default_rpc_endpoint, oid=chunk.object_id)
assert "index" not in chunk_search assert "index" not in chunk_search
@ -456,14 +476,19 @@ class TestECReplication(ClusterTestBase):
@pytest.mark.failover @pytest.mark.failover
@requires_container(PUBLIC_WITH_POLICY("EC 2.1")) @requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_delete_object_in_ec_cnr( def test_delete_object_in_ec_cnr(
self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize, cluster_state_controller: ClusterStateController self,
container: str,
grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
) -> None: ) -> None:
with reporter.step("Put object in container."): with reporter.step("Put object in container."):
test_file = generate_file(object_size.value) test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check object chunks nodes."): with reporter.step("Check object chunks nodes."):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid) chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
replication_count = 3 if object_size.name == "simple" else 3 * 4 replication_count = 3 if object_size.name == "simple" else 3 * 4
assert len(chunks) == replication_count assert len(chunks) == replication_count
@ -479,7 +504,7 @@ class TestECReplication(ClusterTestBase):
oid_second = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid_second = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check second object chunks nodes."): with reporter.step("Check second object chunks nodes."):
chunks_second_object = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid_second) chunks_second_object = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid_second)
assert len(chunks_second_object) == replication_count assert len(chunks_second_object) == replication_count
with reporter.step("Stop nodes with chunk."): with reporter.step("Stop nodes with chunk."):
@ -503,6 +528,7 @@ class TestECReplication(ClusterTestBase):
container: str, container: str,
test_file: TestFile, test_file: TestFile,
grpc_client: GrpcClientWrapper, grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
frostfs_cli: FrostfsCli, frostfs_cli: FrostfsCli,
object_size: ObjectSize, object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
@ -512,7 +538,7 @@ class TestECReplication(ClusterTestBase):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check object chunks nodes."): with reporter.step("Check object chunks nodes."):
chunks_object = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid) chunks_object = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
replication_count = 3 if object_size.name == "simple" else 3 * 4 replication_count = 3 if object_size.name == "simple" else 3 * 4
assert len(chunks_object) == replication_count assert len(chunks_object) == replication_count
@ -609,20 +635,23 @@ class TestECReplication(ClusterTestBase):
assert len(container_nodes) == expected_nodes assert len(container_nodes) == expected_nodes
@allure.title("Create container with EC policy and FILTER") @allure.title("Create container with EC policy and FILTER")
def test_create_container_with_filter(self, grpc_client: GrpcClientWrapper, simple_object_size: ObjectSize) -> None: @requires_container(PUBLIC_WITH_POLICY("EC 1.1 IN RUS SELECT 2 FROM RU AS RUS FILTER Country EQ Russia AS RU"))
with reporter.step("Create Container."): def test_create_container_with_filter(
policy = "EC 1.1 IN RUS SELECT 2 FROM RU AS RUS FILTER Country EQ Russia AS RU" self,
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy=policy, await_mode=True) grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize,
container: str,
) -> None:
with reporter.step("Put object in container."): with reporter.step("Put object in container."):
test_file = generate_file(simple_object_size.value) test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check object is decomposed exclusively on Russian nodes"): with reporter.step("Check object is decomposed exclusively on Russian nodes"):
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, cid, oid=oid) data_chunk = remote_grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
parity_chunk = grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, cid, oid=oid) parity_chunk = remote_grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, container, oid=oid)
node_data_chunk = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk) node_data_chunk = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)
node_parity_chunk = grpc_client.object.chunks.get_chunk_node(self.cluster, parity_chunk) node_parity_chunk = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, parity_chunk)
for node in [node_data_chunk[1], node_parity_chunk[1]]: for node in [node_data_chunk[1], node_parity_chunk[1]]:
assert "Russia" in node.country assert "Russia" in node.country
@ -635,6 +664,7 @@ class TestECReplication(ClusterTestBase):
container: str, container: str,
restore_nodes_shards_mode: None, restore_nodes_shards_mode: None,
frostfs_cli: FrostfsCli, frostfs_cli: FrostfsCli,
remote_frostfs_cli: FrostfsCli,
grpc_client: GrpcClientWrapper, grpc_client: GrpcClientWrapper,
max_object_size: int, max_object_size: int,
type: str, type: str,
@ -645,7 +675,7 @@ class TestECReplication(ClusterTestBase):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Get object chunks."): with reporter.step("Get object chunks."):
chunk = get_chunk(self, frostfs_cli, container, oid, self.cluster.default_rpc_endpoint) chunk = get_chunk(self, remote_frostfs_cli, container, oid, self.cluster.default_rpc_endpoint)
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk) chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)
frostfs_node_cli = self.get_node_cli(chunk_node[0], config=chunk_node[0].storage_node.get_remote_wallet_config_path()) frostfs_node_cli = self.get_node_cli(chunk_node[0], config=chunk_node[0].storage_node.get_remote_wallet_config_path())
@ -690,7 +720,7 @@ class TestECReplication(ClusterTestBase):
test_file: TestFile, test_file: TestFile,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket_container_resolver: BucketContainerResolver, bucket_container_resolver: BucketContainerResolver,
grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper,
object_size: ObjectSize, object_size: ObjectSize,
) -> None: ) -> None:
with reporter.step("Create bucket with EC location constrain"): with reporter.step("Create bucket with EC location constrain"):
@ -706,25 +736,27 @@ class TestECReplication(ClusterTestBase):
with reporter.step("Watch replication count chunks"): with reporter.step("Watch replication count chunks"):
cid = bucket_container_resolver.resolve(self.cluster.cluster_nodes[0], bucket) cid = bucket_container_resolver.resolve(self.cluster.cluster_nodes[0], bucket)
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, bucket_object) chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, bucket_object)
expect_chunks = 4 if object_size.name == "simple" else 16 expect_chunks = 4 if object_size.name == "simple" else 16
assert len(chunks) == expect_chunks assert len(chunks) == expect_chunks
@allure.title("Replication chunk after drop (size={object_size})") @allure.title("Replication chunk after drop (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1 CBF 1")) @requires_container(PUBLIC_WITH_POLICY("EC 2.1 CBF 1"))
def test_drop_chunk_and_replication(self, test_file: TestFile, container: str, grpc_client: GrpcClientWrapper, rep_count: int) -> None: def test_drop_chunk_and_replication(
self, test_file: TestFile, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, rep_count: int
) -> None:
with reporter.step("Put object"): with reporter.step("Put object"):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint) oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Get all chunks"): with reporter.step("Get all chunks"):
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid) data_chunk = remote_grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
with reporter.step("Search chunk node"): with reporter.step("Search chunk node"):
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk) chunk_node = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)
shell_chunk_node = chunk_node[0].host.get_shell() shell_chunk_node = chunk_node[0].host.get_shell()
with reporter.step("Get replication count"): with reporter.step("Get replication count"):
assert self.check_replication(rep_count, grpc_client, container, oid) assert self.check_replication(rep_count, remote_grpc_client, container, oid)
with reporter.step("Delete chunk"): with reporter.step("Delete chunk"):
frostfs_node_cli = FrostfsCli( frostfs_node_cli = FrostfsCli(
@ -735,4 +767,4 @@ class TestECReplication(ClusterTestBase):
frostfs_node_cli.control.drop_objects(chunk_node[0].storage_node.get_control_endpoint(), f"{container}/{data_chunk.object_id}") frostfs_node_cli.control.drop_objects(chunk_node[0].storage_node.get_control_endpoint(), f"{container}/{data_chunk.object_id}")
with reporter.step("Wait replication count after drop one chunk"): with reporter.step("Wait replication count after drop one chunk"):
self.wait_replication(rep_count, grpc_client, container, oid) self.wait_replication(rep_count, remote_grpc_client, container, oid)

View file

@ -5,15 +5,16 @@ import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.cli.container import delete_container
from frostfs_testlib.steps.cli.object import head_object, put_object from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.failover_utils import wait_object_replication from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -21,70 +22,49 @@ OBJECT_ATTRIBUTES = {"common_key": "common_value"}
WAIT_FOR_REPLICATION = 60 WAIT_FOR_REPLICATION = 60
# Adding failover mark because it may make cluster unhealthy # Adding failover mark because it may make cluster unhealthy
@pytest.mark.sanity
@pytest.mark.failover @pytest.mark.failover
@pytest.mark.replication @pytest.mark.replication
class TestReplication(ClusterTestBase): class TestReplication(ClusterTestBase):
@allure.title("Replication (obj_size={object_size})") @allure.title("Replication (obj_size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("REP %NODE_COUNT% CBF 1", short_name="Public_all_except_one_node"))
def test_replication( def test_replication(
self, self,
default_wallet: WalletInfo, wallet: WalletInfo,
client_shell: Shell, client_shell: Shell,
cluster: Cluster, cluster: Cluster,
object_size: ObjectSize, container: str,
test_file: TestFile,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
): ):
nodes_count = len(cluster.cluster_nodes) nodes_count = len(cluster.cluster_nodes)
node_for_rep = random.choice(cluster.cluster_nodes) node_for_rep = random.choice(cluster.cluster_nodes)
alive_nodes = [node for node in cluster.cluster_nodes if node != node_for_rep] alive_nodes = [node for node in cluster.cluster_nodes if node != node_for_rep]
cid = create_container(
wallet=default_wallet,
shell=client_shell,
endpoint=cluster.default_rpc_endpoint,
rule=f"REP 1 IN SELF_PLACE REP {nodes_count - 1} IN OTHER_PLACE CBF 1 "
"SELECT 1 FROM SELF AS SELF_PLACE "
f"SELECT {nodes_count - 1} FROM OTHER AS OTHER_PLACE "
f"FILTER 'UN-LOCODE' EQ '{node_for_rep.storage_node.get_un_locode()}' AS SELF "
f"FILTER 'UN-LOCODE' NE '{node_for_rep.storage_node.get_un_locode()}' AS OTHER",
)
cluster_state_controller.stop_node_host(node_for_rep, mode="hard") with reporter.step("Stop container node host"):
cluster_state_controller.stop_node_host(node_for_rep, mode="hard")
file_path = generate_file(object_size.value)
with reporter.step("Put object"): with reporter.step("Put object"):
oid = put_object( oid = put_object(
wallet=default_wallet, wallet=wallet,
path=file_path, path=test_file,
cid=cid, cid=container,
shell=client_shell, shell=client_shell,
attributes=OBJECT_ATTRIBUTES, attributes=OBJECT_ATTRIBUTES,
copies_number=3, copies_number=nodes_count - 1,
endpoint=random.choice(alive_nodes).storage_node.get_rpc_endpoint(), endpoint=random.choice(alive_nodes).storage_node.get_rpc_endpoint(),
timeout="45s", timeout="45s",
) )
cluster_state_controller.start_node_host(node_for_rep) with reporter.step("Start container node host"):
with reporter.step(f"Wait for replication."): cluster_state_controller.start_node_host(node_for_rep)
object_nodes = wait_object_replication(
cid=cid, with reporter.step(f"Wait for replication"):
oid=oid, object_nodes = wait_object_replication(container, oid, nodes_count, client_shell, self.cluster.storage_nodes)
expected_copies=len(self.cluster.cluster_nodes),
shell=client_shell,
nodes=self.cluster.storage_nodes,
)
with reporter.step("Check attributes"): with reporter.step("Check attributes"):
for node in object_nodes: for node in object_nodes:
header_info = head_object( header_info = head_object(wallet, container, oid, client_shell, node.get_rpc_endpoint(), is_direct=True).get("header", {})
wallet=default_wallet, attributes = header_info.get("attributes", {})
oid=oid,
cid=cid,
shell=self.shell,
endpoint=node.get_rpc_endpoint(),
is_direct=True,
)["header"]
attributes = header_info["attributes"]
for attribute_key, attribute_value in OBJECT_ATTRIBUTES.items(): for attribute_key, attribute_value in OBJECT_ATTRIBUTES.items():
assert attribute_key in attributes, f"{attribute_key} not found in {header_info}" assert attribute_key in attributes, f"{attribute_key} not found in {header_info}"
assert header_info["attributes"].get(attribute_key) == str(attribute_value), ( assert header_info["attributes"].get(attribute_key) == str(attribute_value), (
@ -93,19 +73,6 @@ class TestReplication(ClusterTestBase):
f"expected attribute value: {attribute_value}" f"expected attribute value: {attribute_value}"
) )
# TODO: Research why this fails with reporter.step("Cleanup"):
# with reporter.step("Cleanup"): delete_object(wallet, container, oid, client_shell, cluster.default_rpc_endpoint)
# delete_object( delete_container(wallet, container, client_shell, cluster.default_rpc_endpoint)
# wallet=default_wallet,
# cid=cid,
# oid=oid,
# shell=client_shell,
# endpoint=cluster.default_rpc_endpoint,
# )
# delete_container(
# wallet=default_wallet,
# cid=cid,
# shell=client_shell,
# endpoint=cluster.default_rpc_endpoint,
# )

View file

@ -5,7 +5,7 @@ import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.steps.acl import bearer_token_base64_from_file from frostfs_testlib.steps.acl import bearer_token_base64_from_file
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash from frostfs_testlib.steps.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize

View file

@ -1,9 +1,8 @@
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.epoch import get_epoch from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.steps.http.http_gate import ( from frostfs_testlib.steps.http_gate import (
attr_into_header, attr_into_header,
get_object_by_attr_and_verify_hashes, get_object_by_attr_and_verify_hashes,
get_via_http_curl, get_via_http_curl,
@ -19,55 +18,12 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash
from ....helpers.container_request import REP_1_1_1_PUBLIC, REP_2_2_2_PUBLIC, requires_container from ....helpers.container_request import REP_2_2_2_PUBLIC, requires_container
from ....helpers.utility import wait_for_gc_pass_on_storage_nodes from ....helpers.utility import wait_for_gc_pass_on_storage_nodes
OBJECT_NOT_FOUND_ERROR = "not found" OBJECT_NOT_FOUND_ERROR = "not found"
@allure.link(
"https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#frostfs-http-gateway",
name="frostfs-http-gateway",
)
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.http_gate
class TestHttpGate(ClusterTestBase):
@allure.title("Put over gRPC, Get over HTTP (object_size={object_size})")
@requires_container(REP_1_1_1_PUBLIC)
def test_put_grpc_get_http(self, default_wallet: WalletInfo, container: str, test_file: TestFile):
"""
Test that object can be put using gRPC interface and get using HTTP.
Steps:
1. Create object.
2. Put object using gRPC (frostfs-cli).
3. Download object using HTTP gate (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading).
4. Get object using gRPC (frostfs-cli).
5. Compare hashes for got object.
6. Compare hashes for got and original objects.
Expected result:
Hashes must be the same.
"""
with reporter.step("Put object using gRPC"):
object_id = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster)
with reporter.step("Get object and check hash"):
verify_object_hash(
object_id,
test_file.path,
default_wallet,
container,
self.shell,
self.cluster.storage_nodes,
self.cluster.cluster_nodes[0],
)
@allure.link( @allure.link(
"https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#frostfs-http-gateway", "https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#frostfs-http-gateway",
name="frostfs-http-gateway", name="frostfs-http-gateway",

View file

@ -6,7 +6,7 @@ import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import delete_container, list_containers, wait_for_container_deletion from frostfs_testlib.steps.cli.container import delete_container, list_containers, wait_for_container_deletion
from frostfs_testlib.steps.cli.object import delete_object from frostfs_testlib.steps.cli.object import delete_object
from frostfs_testlib.steps.http.http_gate import ( from frostfs_testlib.steps.http_gate import (
attr_into_str_header_curl, attr_into_str_header_curl,
get_object_by_attr_and_verify_hashes, get_object_by_attr_and_verify_hashes,
try_to_get_object_and_expect_error, try_to_get_object_and_expect_error,

View file

@ -3,24 +3,26 @@ import logging
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper from frostfs_testlib.clients import S3ClientWrapper
from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.cli.object import put_object_to_random_node from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.http.http_gate import ( from frostfs_testlib.steps.http_gate import (
assert_hashes_are_equal, assert_hashes_are_equal,
get_object_by_attr_and_verify_hashes, get_object_by_attr_and_verify_hashes,
get_via_http_gate, get_via_http_gate,
try_to_get_object_via_passed_request_and_expect_error, try_to_get_object_via_passed_request_and_expect_error,
verify_object_hash, verify_object_hash,
) )
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile from frostfs_testlib.utils.file_utils import TestFile, generate_file, split_file
from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
PART_SIZE = 5 * 1024 * 1024
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.sanity @pytest.mark.sanity
@ -66,6 +68,7 @@ class Test_http_object(ClusterTestBase):
cluster=self.cluster, cluster=self.cluster,
attributes=f"{key_value1},{key_value2}", attributes=f"{key_value1},{key_value2}",
) )
with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"): with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"):
verify_object_hash( verify_object_hash(
oid=oid, oid=oid,
@ -91,25 +94,19 @@ class Test_http_object(ClusterTestBase):
) )
with reporter.step("Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"): with reporter.step("Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"):
get_object_by_attr_and_verify_hashes( get_object_by_attr_and_verify_hashes(oid, test_file, container, attrs, self.cluster.cluster_nodes[0])
oid=oid,
file_name=test_file.path,
cid=container,
attrs=attrs,
node=self.cluster.cluster_nodes[0],
)
with reporter.step("[Negative] try to get object: get_by_attribute/$CID/$OID"): with reporter.step("[Negative] try to get object: get_by_attribute/$CID/$OID"):
request = f"/get_by_attribute/{container}/{oid}" request = f"/get_by_attribute/{container}/{oid}"
try_to_get_object_via_passed_request_and_expect_error( try_to_get_object_via_passed_request_and_expect_error(
cid=container, container,
oid=oid, oid,
node=self.cluster.cluster_nodes[0], self.cluster.cluster_nodes[0],
error_pattern=expected_err_msg, error_pattern=expected_err_msg,
http_request_path=request, http_request_path=request,
) )
@allure.title("Put over s3, Get over HTTP with bucket name and key (object_size={object_size})") @allure.title("Put object over S3, get over HTTP with bucket name and key (s3_client={s3_client}, object_size={object_size})")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_object_put_get_bucketname_key(self, test_file: TestFile, s3_client: S3ClientWrapper): def test_object_put_get_bucketname_key(self, test_file: TestFile, s3_client: S3ClientWrapper):
""" """
Test that object can be put using s3-gateway interface and got via HTTP with bucket name and object key. Test that object can be put using s3-gateway interface and got via HTTP with bucket name and object key.
@ -125,17 +122,55 @@ class Test_http_object(ClusterTestBase):
Hashes must be the same. Hashes must be the same.
""" """
object_key = s3_helper.object_key_from_file_path(test_file.path) object_key = s3_helper.object_key_from_file_path(test_file)
bucket = s3_client.create_bucket(acl="public-read-write")
s3_client.put_object(bucket=bucket, filepath=test_file.path, key=object_key)
obj_s3 = s3_client.get_object(bucket=bucket, key=object_key)
request = f"/get/{bucket}/{object_key}" with reporter.step("Create public bucket"):
obj_http = get_via_http_gate( bucket = s3_client.create_bucket(acl="public-read-write")
cid=None,
oid=None, with reporter.step("Put object"):
node=self.cluster.cluster_nodes[0], s3_client.put_object(bucket, test_file, object_key)
request_path=request,
) with reporter.step("Get object via S3 gate"):
with reporter.step("Verify hashes"): obj_s3 = s3_client.get_object(bucket, object_key)
with reporter.step("Get object via HTTP gate"):
obj_http = get_via_http_gate(bucket, object_key, node=self.cluster.cluster_nodes[0])
with reporter.step("Make sure the hashes of both objects are the same"):
assert_hashes_are_equal(test_file.path, obj_http, obj_s3) assert_hashes_are_equal(test_file.path, obj_http, obj_s3)
@allure.title("Put multipart object over S3, get over HTTP with bucket name and key (s3_client={s3_client})")
def test_object_put_get_bucketname_key_multipart(self, s3_client: S3ClientWrapper):
parts = []
parts_count = 5
original_size = PART_SIZE * parts_count
with reporter.step("Create public container"):
bucket = s3_client.create_bucket(acl="public-read-write")
with reporter.step("Generate original object and split it into parts"):
original_file = generate_file(original_size)
file_parts = split_file(original_file, parts_count)
object_key = s3_helper.object_key_from_file_path(original_file)
with reporter.step("Create multipart and upload parts"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
for part_id, file_path in enumerate(file_parts, start=1):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with reporter.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}"
with reporter.step("Complete multipart upload"):
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
with reporter.step("Get multipart object via S3 gate"):
obj_s3 = s3_client.get_object(bucket, object_key)
with reporter.step("Get multipart object via HTTP gate"):
obj_http = get_via_http_gate(bucket, object_key, self.cluster.cluster_nodes[0])
with reporter.step("Make sure the hashes of both objects are the same"):
assert_hashes_are_equal(original_file, obj_http, obj_s3)

View file

@ -3,7 +3,7 @@ import logging
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash from frostfs_testlib.steps.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase

View file

@ -9,7 +9,7 @@ from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.object import get_netmap_netinfo, get_object_from_random_node, head_object from frostfs_testlib.steps.cli.object import get_netmap_netinfo, get_object_from_random_node, head_object
from frostfs_testlib.steps.epoch import get_epoch, wait_for_epochs_align from frostfs_testlib.steps.epoch import get_epoch, wait_for_epochs_align
from frostfs_testlib.steps.http.http_gate import ( from frostfs_testlib.steps.http_gate import (
attr_into_str_header_curl, attr_into_str_header_curl,
try_to_get_object_and_expect_error, try_to_get_object_and_expect_error,
upload_via_http_gate_curl, upload_via_http_gate_curl,

View file

@ -1,10 +1,10 @@
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.clients import S3ClientWrapper
from frostfs_testlib.resources.error_patterns import S3_BUCKET_DOES_NOT_ALLOW_ACL from frostfs_testlib.resources.error_patterns import S3_BUCKET_DOES_NOT_ALLOW_ACL
from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS, PUBLIC_READ_GRANTS, PUBLIC_READ_WRITE_GRANTS from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS, PUBLIC_READ_GRANTS, PUBLIC_READ_WRITE_GRANTS
from frostfs_testlib.s3 import S3ClientWrapper from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
@ -33,32 +33,32 @@ class TestS3GateACL:
def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper): def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper):
with reporter.step("Create bucket with ACL private"): with reporter.step("Create bucket with ACL private"):
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private") bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private")
bucket_grants = s3_client.get_bucket_acl(bucket) bucket_grants = s3_client.get_bucket_acl(bucket).get("Grants")
s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS) s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS)
with reporter.step("Create bucket with ACL public-read"): with reporter.step("Create bucket with ACL public-read"):
read_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read") read_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read")
bucket_grants = s3_client.get_bucket_acl(read_bucket) bucket_grants = s3_client.get_bucket_acl(read_bucket).get("Grants")
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS) s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS)
with reporter.step("Create bucket with ACL public-read-write"): with reporter.step("Create bucket with ACL public-read-write"):
public_rw_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write") public_rw_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
bucket_grants = s3_client.get_bucket_acl(public_rw_bucket) bucket_grants = s3_client.get_bucket_acl(public_rw_bucket).get("Grants")
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS) s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS)
@allure.title("Bucket ACL (s3_client={s3_client})") @allure.title("Bucket ACL (s3_client={s3_client})")
def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper): def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper):
with reporter.step("Create bucket with public-read-write ACL"): with reporter.step("Create bucket with public-read-write ACL"):
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write") bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
bucket_grants = s3_client.get_bucket_acl(bucket) bucket_grants = s3_client.get_bucket_acl(bucket).get("Grants")
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS) s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS)
with reporter.step("Change bucket ACL to private"): with reporter.step("Change bucket ACL to private"):
s3_client.put_bucket_acl(bucket, acl="private") s3_client.put_bucket_acl(bucket, acl="private")
bucket_grants = s3_client.get_bucket_acl(bucket) bucket_grants = s3_client.get_bucket_acl(bucket).get("Grants")
s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS) s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS)
with reporter.step("Change bucket ACL to public-read"): with reporter.step("Change bucket ACL to public-read"):
s3_client.put_bucket_acl(bucket, acl="public-read") s3_client.put_bucket_acl(bucket, acl="public-read")
bucket_grants = s3_client.get_bucket_acl(bucket) bucket_grants = s3_client.get_bucket_acl(bucket).get("Grants")
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS) s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS)

View file

@ -4,8 +4,8 @@ from datetime import datetime, timedelta
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.clients.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils import string_utils from frostfs_testlib.utils import string_utils
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file

View file

@ -4,8 +4,8 @@ from datetime import datetime, timedelta
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper from frostfs_testlib.clients import S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content

View file

@ -1,10 +1,9 @@
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.cli.container import list_objects from frostfs_testlib.steps.cli.container import list_objects
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase

View file

@ -8,11 +8,12 @@ from typing import Literal
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.clients import AwsCliClient, S3ClientWrapper
from frostfs_testlib.clients.s3 import VersioningStatus
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
from frostfs_testlib.resources.error_patterns import S3_BUCKET_DOES_NOT_ALLOW_ACL, S3_MALFORMED_XML_REQUEST from frostfs_testlib.resources.error_patterns import S3_BUCKET_DOES_NOT_ALLOW_ACL, S3_MALFORMED_XML_REQUEST
from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper, VersioningStatus from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils import wallet_utils from frostfs_testlib.utils import wallet_utils

View file

@ -0,0 +1,623 @@
import random
import time
from datetime import datetime
from email.utils import formatdate
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.clients import Boto3ClientWrapper, S3ClientWrapper, S3HttpClient
from frostfs_testlib.clients.s3 import VersioningStatus
from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
from frostfs_testlib.shell.interfaces import CommandOptions
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.steps import s3_helper
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash, split_file
from frostfs_testlib.utils.string_utils import unique_name
from ....resources.common import S3_POLICY_FILE_LOCATION
FIVE_GIGABYTES = 5_368_709_120
PART_SIZE_FOR_MULTIPART = 5 * 1024 * 1024
@reporter.step("Allow patch for bucket")
def allow_patch_for_bucket(s3_client: S3ClientWrapper, bucket: str):
s3_client.put_bucket_policy(
bucket,
policy={
"Version": "2012-10-17",
"Id": "aaaa-bbbb-cccc-dddd",
"Statement": [
{
"Sid": "AddPerm",
"Effect": "Allow",
"Principal": "*",
"Action": ["s3:PatchObject"],
"Resource": [f"arn:aws:s3:::{bucket}/*"],
},
],
},
)
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" not in metafunc.fixturenames:
return
metafunc.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], ids=["s3policy"], indirect=True)
@pytest.fixture(scope="session", params=[pytest.param("rep3", marks=pytest.mark.rep), pytest.param("ec3.1", marks=pytest.mark.ec)])
def placement_policy(request: pytest.FixtureRequest) -> PlacementPolicy:
if request.param == "ec3.1":
return PlacementPolicy("ec3.1", "ec3.1")
return PlacementPolicy("rep3", "rep3")
@pytest.fixture(scope="session")
def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus:
if "param" in request.__dict__:
return request.param
return VersioningStatus.UNDEFINED
@allure.title("[Class] Create bucket")
@pytest.fixture(scope="class")
def bucket(s3_client: S3ClientWrapper, versioning_status: VersioningStatus, placement_policy: PlacementPolicy) -> str:
with reporter.step(f"Create bucket with location constraint {placement_policy.value}"):
bucket = s3_client.create_bucket(location_constraint=placement_policy.value)
s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status)
allow_patch_for_bucket(s3_client, bucket)
return bucket
@pytest.fixture(scope="function")
def original_object(s3_client: S3ClientWrapper, bucket: str, test_file: TestFile) -> str:
with reporter.step("Put object"):
key = s3_helper.object_key_from_file_path(test_file)
s3_client.put_object(bucket, test_file, key)
return key
@allure.title("[Session]: Create S3 client for another user")
@pytest.fixture(scope="session")
def another_s3_client(
users_pool: list[User],
s3_policy: str | None,
cluster: Cluster,
credentials_provider: CredentialsProvider,
s3_client: S3ClientWrapper,
) -> S3ClientWrapper:
user = users_pool[0]
node = cluster.cluster_nodes[0]
credentials_provider.S3.provide(user, node, s3_policy)
s3_client_cls = type(s3_client)
return s3_client_cls(user.s3_credentials.access_key, user.s3_credentials.secret_key, cluster.default_s3_gate_endpoint)
@allure.title("[Class] Create bucket under another user")
@pytest.fixture(scope="class")
def another_bucket(another_s3_client: S3ClientWrapper, versioning_status: VersioningStatus, placement_policy: PlacementPolicy) -> str:
with reporter.step(f"Create bucket with location constraint {placement_policy.value}"):
bucket = another_s3_client.create_bucket(location_constraint=placement_policy.value)
s3_helper.set_bucket_versioning(another_s3_client, bucket, versioning_status)
allow_patch_for_bucket(another_s3_client, bucket)
return bucket
@pytest.mark.nightly
@pytest.mark.s3_gate
class TestS3ObjectPatch(ClusterTestBase):
@allure.title("Patch simple object payload (range={patch_range}, s3_client={s3_client}, policy={placement_policy})")
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
@pytest.mark.parametrize(
"patch_range",
# String "object" denotes size of object.
["0:19", "500:550", "object/2-100:object/2+200", "object-1:object", "object:object", "object:object+123"],
)
def test_patch_simple_object_payload(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
original_object: str,
object_size: ObjectSize,
patch_range: str,
):
start, end = s3_helper.get_range_relative_to_object(patch_range, object_size.value, int_values=True)
content_size = end - start + 1
content_range = f"bytes {start}-{end}/*"
with reporter.step("Generate payload object"):
content_file = generate_file(content_size)
with reporter.step("Patch simple object"):
s3_http_client.patch_object(bucket, original_object, content_file, content_range)
with reporter.step("Get patched part of object and make sure it has changed correctly"):
patched_file_part = s3_client.get_object(bucket, original_object, object_range=(start, end))
assert get_file_hash(patched_file_part) == get_file_hash(
content_file
), "Expected content hash did not match actual content hash"
@allure.title("Patch complex object payload (range={patch_range}, s3_client={s3_client}, policy={placement_policy})")
@pytest.mark.parametrize("object_size", ["complex"], indirect=True)
@pytest.mark.parametrize(
"patch_range",
# Strings "object" and "part" denote size of object and its part, respectively.
["part:part+100", "object-part:object", "0:part", "part*2:part*3", "part-1:part*2", "part+1:part*2-1"],
)
def test_patch_complex_object_payload(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
original_object: str,
object_size: ObjectSize,
max_object_size: int,
patch_range: str,
):
start, end = s3_helper.get_range_relative_to_object(patch_range, object_size.value, max_object_size, int_values=True)
content_size = end - start + 1
content_range = f"bytes {start}-{end}/*"
with reporter.step("Generate payload object"):
content_file = generate_file(content_size)
with reporter.step("Patch complex object"):
s3_http_client.patch_object(bucket, original_object, content_file, content_range)
with reporter.step("Get patched part of object and make sure it has changed correctly"):
patched_file_part = s3_client.get_object(bucket, original_object, object_range=(start, end))
assert get_file_hash(patched_file_part) == get_file_hash(
content_file
), "Expected content hash did not match actual content hash"
@allure.title(
"Patch object with fulfilled If-Match condition (s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
)
def test_patch_with_fulfilled_if_match_contidion(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
original_object: str,
):
start, end = 100, 199
content_size = end - start + 1
content_range = f"bytes {start}-{end}/*"
with reporter.step("Generate payload object"):
content_file = generate_file(content_size)
expected_hash = get_file_hash(content_file)
with reporter.step("Get object ETag attribute"):
object_info = s3_client.head_object(bucket, original_object)
etag = object_info["ETag"]
with reporter.step("Patch object with If-Match header"):
s3_http_client.patch_object(bucket, original_object, content_file, content_range, if_match=etag)
with reporter.step("Get patched object and make sure it has changed correctly"):
patched_file = s3_client.get_object(bucket, original_object)
patched_hash = get_file_hash(patched_file, offset=start, len=content_size)
assert patched_hash == expected_hash, "Expected content hash did not match actual content hash"
@allure.title(
"[NEGATIVE] Patch cannot be applied with failed If-Match condition "
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
)
def test_patch_with_failed_if_match_condition(self, s3_http_client: S3HttpClient, bucket: str, original_object: str):
with reporter.step("Try patch object with If-Match header and get exception"):
with pytest.raises(Exception, match="PreconditionFailed"):
s3_http_client.patch_object(bucket, original_object, "content", "bytes 0-6/*", if_match="nonexistentetag")
@allure.title(
"Patch object with fulfilled If-Unmodified-Since condition "
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
)
def test_patch_with_fulfilled_if_unmodified_since_condition(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
original_object: str,
):
start, end = 235, 341
content_size = end - start + 1
content_range = f"bytes {start}-{end}/*"
with reporter.step("Generate payload object"):
content_file = generate_file(content_size)
expected_hash = get_file_hash(content_file)
with reporter.step("Get object LastModified attribute"):
response = s3_client.head_object(bucket, original_object)
if isinstance(response["LastModified"], str):
response["LastModified"] = datetime.fromisoformat(response["LastModified"])
# Convert datetime to RFC 7232 format
last_modified = formatdate(response["LastModified"].timestamp(), localtime=False, usegmt=True)
with reporter.step("Patch object with If-Unmodified-Since header"):
s3_http_client.patch_object(bucket, original_object, content_file, content_range, if_unmodified_since=last_modified)
with reporter.step("Get patched object and make sure it has changed correctly"):
patched_file = s3_client.get_object(bucket, original_object)
patched_hash = get_file_hash(patched_file, offset=start, len=content_size)
assert patched_hash == expected_hash, "Expected content hash did not match actual content hash"
@allure.title(
"[NEGATIVE] Patch cannot be applied with failed If-Unmodified-Since condition "
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
)
def test_patch_with_failed_if_unmodified_since_condition(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
original_object: str,
test_file: TestFile,
):
with reporter.step("Get original object LastModified attribute"):
response = s3_client.head_object(bucket, original_object)
if isinstance(response["LastModified"], str):
response["LastModified"] = datetime.fromisoformat(response["LastModified"])
# Convert datetime to RFC 7232 format
previous_last_modified = formatdate(response["LastModified"].timestamp(), localtime=False, usegmt=True)
with reporter.step("Wait two seconds for LastModified to update"):
# Next PUT for a simple object occurs at the same second the object was initially loaded,
# so the LastModified attribute "as if" does not change after the operation.
time.sleep(2)
with reporter.step("Put new data for existing object"):
s3_client.put_object(bucket, test_file, original_object)
with reporter.step("Get object LastModified attribute with new data and make sure it has changed"):
response = s3_client.head_object(bucket, original_object)
if isinstance(response["LastModified"], str):
response["LastModified"] = datetime.fromisoformat(response["LastModified"])
# Convert datetime to RFC 7232 format
last_modified = formatdate(response["LastModified"].timestamp(), localtime=False, usegmt=True)
assert last_modified != previous_last_modified, f"Attribute LastModified was expected to change: {last_modified}"
with reporter.step("Try patch object with If-Unmodified-Since header and get exception"):
with pytest.raises(Exception, match="PreconditionFailed"):
s3_http_client.patch_object(bucket, original_object, b"modify", "bytes 0-5/*", if_unmodified_since=previous_last_modified)
@allure.title(
"Patch object with fulfilled x-amz-expected-bucket-owner condition "
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
)
def test_patch_with_fulfilled_if_expected_bucket_owner_condition(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
original_object: str,
):
start, end = 512, 749
content_size = end - start + 1
content_range = f"bytes {start}-{end}/*"
with reporter.step("Generate payload object"):
content_file = generate_file(content_size)
expected_hash = get_file_hash(content_file)
with reporter.step("Get bucket owner ID"):
bucket_acl = s3_client.get_bucket_acl(bucket)
expected_bucket_owner = bucket_acl["Owner"]["DisplayName"]
with reporter.step("Patch object with x-amz-expected-bucket-owner header"):
s3_http_client.patch_object(
bucket,
original_object,
content_file,
content_range,
x_amz_expected_bucket_owner=expected_bucket_owner,
)
with reporter.step("Get patched object and make sure it has changed correctly"):
patched_file = s3_client.get_object(bucket, original_object)
patched_hash = get_file_hash(patched_file, offset=start, len=content_size)
assert patched_hash == expected_hash, "Expected content hash did not match actual content hash"
@allure.title(
"[NEGATIVE] Patch cannot be applied with non-existent bucket owner ID in x-amz-expected-bucket-owner header "
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
)
def test_patch_with_non_existent_bucket_owner_id(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
original_object: str,
):
with reporter.step("Get bucket owner ID"):
bucket_acl = s3_client.get_bucket_acl(bucket)
bucket_owner = bucket_acl["Owner"]["DisplayName"]
with reporter.step("Change owner ID to non-existent"):
unexpected_bucket_owner = list(bucket_owner)
random.shuffle(unexpected_bucket_owner)
unexpected_bucket_owner = "".join(unexpected_bucket_owner)
with reporter.step("Try patch object with x-amz-expected-bucket-owner header and get exception"):
with pytest.raises(Exception, match="AccessDenied"):
s3_http_client.patch_object(
bucket,
original_object,
b"blablabla",
"bytes 10-18/*",
x_amz_expected_bucket_owner=unexpected_bucket_owner,
)
# AwsCliClient is not configured correctly for some cases,
# resulting in both buckets being created from the same user, which causes an error.
@allure.title(
"[NEGATIVE] Patch cannot be applied with another bucket owner ID in x-amz-expected-bucket-owner header "
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
)
@pytest.mark.parametrize("s3_client", [Boto3ClientWrapper], indirect=True)
def test_patch_with_another_bucket_owner_id(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
original_object: str,
another_bucket: str,
):
with reporter.step("Get owner ID of another bucket"):
bucket_acl = s3_client.get_bucket_acl(another_bucket)
another_bucket_owner = bucket_acl["Owner"]["DisplayName"]
with reporter.step("Try patch object with x-amz-expected-bucket-owner header and get exception"):
with pytest.raises(Exception, match="AccessDenied"):
s3_http_client.patch_object(
bucket,
original_object,
b"blablabla",
"bytes 10-18/*",
x_amz_expected_bucket_owner=another_bucket_owner,
)
@allure.title(
"[NEGATIVE] Patch cannot be applied with invalid Content-Range header "
"(range={patch_range}, s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
)
@pytest.mark.parametrize(
"patch_range",
# String "object" denotes size of object.
["object+100:200", "object+10:object+16", "-1:1", "20:100", "0:2", f"0:{FIVE_GIGABYTES}", "0:0"],
)
def test_patch_with_invalid_content_range(
self,
s3_http_client: S3HttpClient,
bucket: str,
original_object: str,
object_size: ObjectSize,
patch_range: str,
):
content_range = s3_helper.get_range_relative_to_object(patch_range, object_size.value)
with reporter.step("Try patch object with invalid Content-Range header and get exception"):
with pytest.raises(Exception, match="InvalidRange"):
s3_http_client.patch_object(bucket, original_object, b"content", content_range)
@allure.title(
"[NEGATIVE] Patch cannot be applied without Content-Range header "
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
)
def test_patch_without_content_range(self, s3_http_client: S3HttpClient, bucket: str, original_object: str):
with reporter.step("Try patch object without Content-Range header and get exception"):
with pytest.raises(Exception, match="MissingContentRange"):
s3_http_client.patch_object(bucket, original_object, b"content", None)
@allure.title(
"[NEGATIVE] Patch cannot be applied without Content-Length header "
"(s3_client={s3_client}, object_size={object_size}, policy={placement_policy})"
)
def test_patch_without_content_length(
self,
s3_http_client: S3HttpClient,
bucket: str,
original_object: str,
node_under_test: ClusterNode,
):
with reporter.step("Generate headers that comply with AWS specification"):
data = "content"
url = f"{self.cluster.default_s3_gate_endpoint}/{bucket}/{original_object}"
host = self.cluster.default_s3_gate_endpoint[8:]
headers = {"Host": host, "Url": url, "Content-Range": "bytes 0-6/*"}
headers = dict(s3_http_client._create_aws_request("PATCH", url, headers, data).headers)
headers.pop("Content-Length", None)
with reporter.step("Try patch object without Content-Length header and get exception"):
curl = GenericCli("curl", node_under_test.host)
request = f" {url} -X PATCH"
for header, value in headers.items():
request += f" -H '{header}: {value}'"
# Remove Content-Length header
# *Header without a value means to CURL that it should not be inserted into the request.
request += " -H 'Content-Length:'"
request += f" -d '{data}' -k"
response = curl(request, shell=LocalShell(), options=CommandOptions(check=False))
assert "MissingContentLength" in response.stdout, response.stdout
@allure.title("[NEGATIVE] Patch cannot be applied to non-existent bucket")
def test_patch_non_existent_bucket(self, s3_http_client: S3HttpClient):
with reporter.step("Try patch object in non-existent bucket and get exception"):
with pytest.raises(Exception, match="NoSuchBucket"):
s3_http_client.patch_object("fake-bucket", unique_name("object-"), b"content", "bytes 0-6/*")
@allure.title("[NEGATIVE] Patch cannot be applied to non-existent object (s3_client={s3_client}, policy={placement_policy})")
def test_patch_non_existent_object(self, s3_http_client: S3HttpClient, bucket: str):
with reporter.step("Try patch non-existent object and get exception"):
with pytest.raises(Exception, match="NoSuchKey"):
s3_http_client.patch_object(bucket, "fake-object", b"content", "bytes 0-6/*")
@allure.title("Patch object in versioned bucket (s3_client={s3_client}, object_size={object_size}, policy={placement_policy})")
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
def test_patch_object_in_versioned_bucket(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
object_size: ObjectSize,
):
patch_ranges = ["0:35", "40:49", "object-100:object", "object:object+231"]
with reporter.step("Generate original object"):
original_file = generate_file(object_size.value)
original_key = s3_helper.object_key_from_file_path(original_file)
with reporter.step("Put object"):
version = s3_client.put_object(bucket, original_file, original_key)
expected_versions = {version}
with reporter.step("Patch versioned object"):
for rng in patch_ranges:
start, end = s3_helper.get_range_relative_to_object(rng, object_size=object_size.value, int_values=True)
content_size = end - start + 1
content_range = f"bytes {start}-{end}/*"
with reporter.step(f"Generate payload object of {content_size} bytes"):
content_file = generate_file(content_size)
with reporter.step(f"Patch object and get new version"):
response = s3_http_client.patch_object(bucket, original_key, content_file, content_range, version_id=version)
version = response["VersionId"]
expected_versions.add(version)
with reporter.step(f"Get patched part of object and make sure it has changed correctly"):
got_part = s3_client.get_object(bucket, original_key, version_id=version, object_range=(start, end))
assert get_file_hash(got_part) == get_file_hash(content_file), "Expected content hash did not match actual content hash"
with reporter.step("Check that all expected versions are in bucket"):
got_versions = {
version.get("VersionId") for version in s3_client.list_objects_versions(bucket) if version.get("Key") == original_key
}
assert expected_versions == got_versions, f"Expected versions of object are missing from bucket: {expected_versions}"
@allure.title("Patch multipart object (range={patch_range}, s3_client={s3_client}, policy={placement_policy})")
@pytest.mark.parametrize("patch_range", ["0:part-1", "part:part*2-1", "part-100:part*2+200", "object-part-1:object"])
def test_s3_patch_multipart_object(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
patch_range: str,
):
parts_count = 5
parts = []
original_size = PART_SIZE_FOR_MULTIPART * parts_count
with reporter.step("Generate original object and split it into parts"):
original_file = generate_file(original_size)
file_parts = split_file(original_file, parts_count)
object_key = s3_helper.object_key_from_file_path(original_file)
start, end = s3_helper.get_range_relative_to_object(
patch_range, object_size=original_size, part_size=PART_SIZE_FOR_MULTIPART, int_values=True
)
content_size = end - start + 1
content_range = f"bytes {start}-{end}/*"
with reporter.step("Generate payload object"):
content_file = generate_file(content_size)
with reporter.step("Create multipart and upload parts"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
for part_id, file_path in enumerate(file_parts, start=1):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with reporter.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}"
with reporter.step("Complete multipart upload"):
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
with reporter.step("Patch multipart object"):
s3_http_client.patch_object(bucket, object_key, content_file, content_range, timeout=200)
with reporter.step("Get patched part of object and make sure it has changed correctly"):
got_part = s3_client.get_object(bucket, object_key, object_range=(start, end))
assert get_file_hash(got_part) == get_file_hash(content_file), "Expected content hash did not match actual content hash"
@allure.title("Patch multipart object in versioned bucket (s3_client={s3_client}, policy={placement_policy})")
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
def test_s3_patch_multipart_object_in_versioned_bucket(
self,
s3_client: S3ClientWrapper,
s3_http_client: S3HttpClient,
bucket: str,
):
parts = []
parts_count = 5
original_size = PART_SIZE_FOR_MULTIPART * parts_count
patch_ranges = ["0:part-1", "part:part*2-1", "part-100:part*2+200", "object-part-1:object"]
with reporter.step("Generate original object and split it into parts"):
original_file = generate_file(original_size)
original_key = s3_helper.object_key_from_file_path(original_file)
file_parts = split_file(original_file, parts_count)
with reporter.step("Create multipart and upload parts"):
upload_id = s3_client.create_multipart_upload(bucket, original_key)
for part_id, file_path in enumerate(file_parts, start=1):
etag = s3_client.upload_part(bucket, original_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with reporter.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, original_key, upload_id)
assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}"
with reporter.step("Complete multipart upload"):
response = s3_client.complete_multipart_upload(bucket, original_key, upload_id, parts)
version = response["VersionId"]
expected_versions = {version}
with reporter.step("Patch versioned multipart object"):
for rng in patch_ranges:
start, end = s3_helper.get_range_relative_to_object(
rng, object_size=original_size, part_size=PART_SIZE_FOR_MULTIPART, int_values=True
)
content_size = end - start + 1
content_range = f"bytes {start}-{end}/*"
with reporter.step("Generate payload object"):
content_file = generate_file(content_size)
with reporter.step("Patch multipart object and get new version"):
response = s3_http_client.patch_object(
bucket, original_key, content_file, content_range, version_id=version, timeout=200
)
version = response["VersionId"]
expected_versions.add(version)
with reporter.step("Get patched part of object and make sure it has changed correctly"):
got_part = s3_client.get_object(bucket, original_key, version_id=version, object_range=(start, end))
assert get_file_hash(got_part) == get_file_hash(content_file), "Expected content hash did not match actual content hash"
with reporter.step("Check that all expected versions are in bucket"):
got_versions = {
version.get("VersionId") for version in s3_client.list_objects_versions(bucket) if version.get("Key") == original_key
}
assert expected_versions == got_versions, f"Expected versions of object are missing from bucket: {expected_versions}"
# TODO: Negative scenario for SSE objects is postponed for now.

View file

@ -4,9 +4,8 @@ import allure
import pytest import pytest
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.steps.storage_policy import get_simple_object_copies from frostfs_testlib.steps.storage_policy import get_simple_object_copies
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo

View file

@ -5,8 +5,8 @@ from typing import Tuple
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper from frostfs_testlib.clients import S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file

View file

@ -3,8 +3,8 @@ import os
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.clients.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content, get_file_content from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content, get_file_content

View file

@ -18,6 +18,7 @@ def _check_version_format(version):
@allure.title("Check binaries versions") @allure.title("Check binaries versions")
@pytest.mark.nightly
@pytest.mark.check_binaries @pytest.mark.check_binaries
def test_binaries_versions(hosting: Hosting): def test_binaries_versions(hosting: Hosting):
""" """

View file

@ -7,8 +7,8 @@ from frostfs_testlib.utils import string_utils
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def owner_wallet(default_wallet: WalletInfo) -> WalletInfo: def owner_wallet(wallet: WalletInfo) -> WalletInfo:
return default_wallet return wallet
@pytest.fixture(scope="module") @pytest.fixture(scope="module")

View file

@ -3,8 +3,8 @@ import random
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import SESSION_NOT_FOUND from frostfs_testlib.resources.error_patterns import SESSION_NOT_FOUND
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import delete_object, put_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import delete_object, put_object, put_object_to_random_node
from frostfs_testlib.steps.session_token import create_session_token from frostfs_testlib.steps.session_token import create_session_token
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
@ -12,13 +12,17 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import APE_OWNER_ALLOW_ALL, ContainerRequest
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.session_token @pytest.mark.session_token
class TestDynamicObjectSession(ClusterTestBase): class TestDynamicObjectSession(ClusterTestBase):
@allure.title("Object Operations with Session Token (obj_size={object_size})") @allure.title("Object Operations with Session Token (obj_size={object_size})")
def test_object_session_token(self, default_wallet: WalletInfo, object_size: ObjectSize): @pytest.mark.parametrize("user_tag", ["TestDynamicObjectSession"], indirect=True) # provide dedicated user with no APE side-policies
def test_object_session_token(self, wallet: WalletInfo, frostfs_cli: FrostfsCli, object_size: ObjectSize):
""" """
Test how operations over objects are executed with a session token Test how operations over objects are executed with a session token
@ -32,17 +36,14 @@ class TestDynamicObjectSession(ClusterTestBase):
with a session token with a session token
""" """
with reporter.step("Init wallet"):
wallet = default_wallet
with reporter.step("Nodes Settlements"): with reporter.step("Nodes Settlements"):
session_token_node, container_node, non_container_node = random.sample(self.cluster.storage_nodes, 3) session_token_node, container_node, non_container_node = random.sample(self.cluster.storage_nodes, 3)
with reporter.step("Create Session Token"): with reporter.step("Create Session Token"):
session_token = create_session_token( session_token = create_session_token(
shell=self.shell, shell=self.shell,
owner=default_wallet.get_address(), owner=wallet.get_address(),
wallet=default_wallet, wallet=wallet,
rpc_endpoint=session_token_node.get_rpc_endpoint(), rpc_endpoint=session_token_node.get_rpc_endpoint(),
) )
@ -54,11 +55,13 @@ class TestDynamicObjectSession(ClusterTestBase):
f'AS LOC_{locode}_PLACE FILTER "UN-LOCODE" ' f'AS LOC_{locode}_PLACE FILTER "UN-LOCODE" '
f'EQ "{un_locode}" AS LOC_{locode}' f'EQ "{un_locode}" AS LOC_{locode}'
) )
cid = create_container( cid = create_container_with_ape(
ContainerRequest(placement_policy, APE_OWNER_ALLOW_ALL),
frostfs_cli,
wallet, wallet,
shell=self.shell, self.shell,
endpoint=self.cluster.default_rpc_endpoint, self.cluster,
rule=placement_policy, self.cluster.default_rpc_endpoint,
) )
with reporter.step("Put Objects"): with reporter.step("Put Objects"):

View file

@ -3,9 +3,9 @@ import logging
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import EXPIRED_SESSION_TOKEN, MALFORMED_REQUEST, OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND from frostfs_testlib.resources.error_patterns import EXPIRED_SESSION_TOKEN, MALFORMED_REQUEST, OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import ( from frostfs_testlib.steps.cli.object import (
delete_object, delete_object,
get_object, get_object,
@ -38,16 +38,24 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_creation import create_containers_with_ape
from ...helpers.container_request import OWNER_ALLOW_ALL, MultipleContainersRequest
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
RANGE_OFFSET_FOR_COMPLEX_OBJECT = 200 RANGE_OFFSET_FOR_COMPLEX_OBJECT = 200
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def storage_containers(owner_wallet: WalletInfo, client_shell: Shell, cluster: Cluster) -> list[str]: def storage_containers(owner_wallet: WalletInfo, frostfs_cli: FrostfsCli, client_shell: Shell, cluster: Cluster) -> list[str]:
cid = create_container(owner_wallet, shell=client_shell, endpoint=cluster.default_rpc_endpoint) return create_containers_with_ape(
other_cid = create_container(owner_wallet, shell=client_shell, endpoint=cluster.default_rpc_endpoint) frostfs_cli,
yield [cid, other_cid] owner_wallet,
client_shell,
cluster,
cluster.default_rpc_endpoint,
MultipleContainersRequest([OWNER_ALLOW_ALL, OWNER_ALLOW_ALL]),
)
@pytest.fixture( @pytest.fixture(
@ -135,6 +143,7 @@ def static_sessions(
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.static_session @pytest.mark.static_session
@pytest.mark.parametrize("user_tag", ["TestObjectStaticSession"], indirect=True) # provide dedicated user with no APE side-policies
class TestObjectStaticSession(ClusterTestBase): class TestObjectStaticSession(ClusterTestBase):
@allure.title("Read operations with static session (method={method_under_test.__name__}, obj_size={object_size})") @allure.title("Read operations with static session (method={method_under_test.__name__}, obj_size={object_size})")
@pytest.mark.parametrize( @pytest.mark.parametrize(