Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

43 changed files with 1667 additions and 2790 deletions

View file

@ -10,7 +10,7 @@ repos:
- id: isort
name: isort (python)
- repo: https://git.frostfs.info/TrueCloudLab/allure-validator
rev: 1.1.1
rev: 1.1.0
hooks:
- id: allure-validator
args: [

View file

@ -1,3 +0,0 @@
import os
TESTS_BASE_PATH = os.path.dirname(os.path.relpath(__file__))

View file

@ -6,7 +6,7 @@ from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from ..helpers.object_access import (
from pytest_tests.helpers.object_access import (
can_delete_object,
can_get_head_object,
can_get_object,

View file

@ -1,56 +0,0 @@
import json
import time
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import datetime_utils
from .container_request import ContainerRequest
def create_container_with_ape(
frostfs_cli: FrostfsCli, wallet: WalletInfo, shell: Shell, cluster: Cluster, endpoint: str, container_request: ContainerRequest
):
with reporter.step("Create container"):
cid = _create_container_by_spec(wallet, shell, cluster, endpoint, container_request)
with reporter.step("Apply APE rules for container"):
if container_request.ape_rules:
_apply_ape_rules(frostfs_cli, endpoint, cid, container_request.ape_rules)
with reporter.step("Wait for one block"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
with reporter.step("Search nodes holding the container"):
container_holder_nodes = search_nodes_with_container(wallet, cid, shell, cluster.default_rpc_endpoint, cluster)
report_data = {node.id: node.host_ip for node in container_holder_nodes}
reporter.attach(json.dumps(report_data, indent=2), "container_nodes.json")
return cid
@reporter.step("Create container by spec {container_request}")
def _create_container_by_spec(
wallet: WalletInfo, shell: Shell, cluster: Cluster, endpoint: str, container_request: ContainerRequest
) -> str:
return create_container(wallet, shell, endpoint, container_request.parsed_rule(cluster), wait_for_creation=False)
def _apply_ape_rules(frostfs_cli: FrostfsCli, endpoint: str, cid: str, ape_rules: list[ape.Rule]):
for ape_rule in ape_rules:
rule_str = ape_rule.as_string()
with reporter.step(f"Apply APE rule '{rule_str}' for container {cid}"):
frostfs_cli.ape_manager.add(
endpoint,
ape_rule.chain_id,
target_name=cid,
target_type="container",
rule=rule_str,
)

View file

@ -1,60 +0,0 @@
from dataclasses import dataclass
from functools import partial
from frostfs_testlib.steps.cli.container import DEFAULT_PLACEMENT_RULE
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape
APE_EVERYONE_ALLOW_ALL = [ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL)]
# In case if we need container operations
# ape.Rule(ape.Verb.ALLOW, ape.ContainerOperations.WILDCARD_ALL)]
APE_OWNER_ALLOW_ALL = [ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, ape.Condition.by_role(ape.Role.OWNER))]
# In case if we need container operations
# ape.Rule(ape.Verb.ALLOW, ape.ContainerOperations.WILDCARD_ALL, ape.Condition.by_role(ape.Role.OWNER))]
@dataclass
class ContainerRequest:
policy: str = None
ape_rules: list[ape.Rule] = None
short_name: str | None = None
def __post_init__(self):
if self.ape_rules is None:
self.ape_rules = []
# For pytest instead of ids=[...] everywhere
self.__name__ = self.short_name
def parsed_rule(self, cluster: Cluster):
if self.policy is None:
return None
substitutions = {"%NODE_COUNT%": str(len(cluster.cluster_nodes))}
parsed_rule = self.policy
for sub, replacement in substitutions.items():
parsed_rule = parsed_rule.replace(sub, replacement)
return parsed_rule
def __repr__(self):
if self.short_name:
return self.short_name
spec_info: list[str] = []
if self.policy:
spec_info.append(f"policy='{self.policy}'")
if self.ape_rules:
ape_rules_list = ", ".join([f"'{rule.as_string()}'" for rule in self.ape_rules])
spec_info.append(f"ape_rules=[{ape_rules_list}]")
return f"({', '.join(spec_info)})"
PUBLIC_WITH_POLICY = partial(ContainerRequest, ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="Custom_policy_with_allow_all_ape_rule")
EVERYONE_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="Everyone_Allow_All")
OWNER_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_OWNER_ALLOW_ALL, short_name="Owner_Allow_All")
PRIVATE = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=[], short_name="Private_No_APE")

View file

@ -0,0 +1,23 @@
from dataclasses import dataclass
from frostfs_testlib.steps.cli.container import DEFAULT_PLACEMENT_RULE
from frostfs_testlib.storage.cluster import Cluster
@dataclass
class ContainerSpec:
rule: str = DEFAULT_PLACEMENT_RULE
basic_acl: str = None
allow_owner_via_ape: bool = False
def parsed_rule(self, cluster: Cluster):
if self.rule is None:
return None
substitutions = {"%NODE_COUNT%": str(len(cluster.cluster_nodes))}
parsed_rule = self.rule
for sub, replacement in substitutions.items():
parsed_rule = parsed_rule.replace(sub, replacement)
return parsed_rule

View file

@ -1,26 +0,0 @@
from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.steps.cli.container import get_container
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo
from workspace.frostfs_testcases.pytest_tests.helpers.utility import placement_policy_from_container
def validate_object_policy(wallet: str, shell: Shell, placement_rule: str, cid: str, endpoint: str):
got_policy = placement_policy_from_container(get_container(wallet, cid, shell, endpoint, False))
assert got_policy.replace("'", "") == placement_rule.replace(
"'", ""
), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same"
def get_netmap_param(netmap_info: list[NodeNetmapInfo]) -> dict:
dict_external = dict()
for node in netmap_info:
external_adress = node.external_address[0].split("/")[2]
dict_external[external_adress] = {
"country": node.country,
"country_code": node.country_code,
"Price": node.price,
"continent": node.continent,
"un_locode": node.un_locode,
"location": node.location,
}
return dict_external

View file

@ -1,8 +1,5 @@
import os
from .. import TESTS_BASE_PATH
TEST_CYCLES_COUNT = int(os.getenv("TEST_CYCLES_COUNT", "1"))
DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env"))
S3_POLICY_FILE_LOCATION = os.path.join(TESTS_BASE_PATH, "resources/files/policy.json")

View file

@ -0,0 +1,106 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.container_access import assert_full_access_to_container, assert_no_access_to_container, assert_read_only_container
from ....helpers.container_spec import ContainerSpec
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.acl
class TestACLBasic(ClusterTestBase):
@allure.title("Operations in public container available to everyone (obj_size={object_size})")
@pytest.mark.container(ContainerSpec(basic_acl=PUBLIC_ACL_F))
def test_basic_acl_public(
self,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
client_shell: Shell,
container: str,
file_path: str,
cluster: Cluster,
):
"""
Test access to object operations in public container.
"""
for wallet, role in ((default_wallet, "owner"), (other_wallet, "others")):
with reporter.step("Put objects to container"):
# We create new objects for each wallet because assert_full_access_to_container
# deletes the object
owner_object_oid = put_object_to_random_node(
default_wallet,
file_path,
container,
shell=self.shell,
cluster=self.cluster,
attributes={"created": "owner"},
)
other_object_oid = put_object_to_random_node(
other_wallet,
file_path,
container,
shell=self.shell,
cluster=self.cluster,
attributes={"created": "other"},
)
with reporter.step(f"Check {role} has full access to public container"):
assert_full_access_to_container(wallet, container, owner_object_oid, file_path, client_shell, cluster)
assert_full_access_to_container(wallet, container, other_object_oid, file_path, client_shell, cluster)
@allure.title("Operations in private container only available to owner (obj_size={object_size})")
@pytest.mark.container(ContainerSpec(basic_acl=PRIVATE_ACL_F))
def test_basic_acl_private(
self,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
client_shell: Shell,
container: str,
file_path: str,
cluster: Cluster,
):
"""
Test access to object operations in private container.
"""
with reporter.step("Put object to container"):
owner_object_oid = put_object_to_random_node(default_wallet, file_path, container, client_shell, cluster)
with reporter.step("Check no one except owner has access to operations with container"):
assert_no_access_to_container(other_wallet, container, owner_object_oid, file_path, client_shell, cluster)
with reporter.step("Check owner has full access to private container"):
assert_full_access_to_container(default_wallet, container, owner_object_oid, file_path, self.shell, cluster)
@allure.title("Read operations in readonly container available to others (obj_size={object_size})")
@pytest.mark.container(ContainerSpec(basic_acl=READONLY_ACL_F))
def test_basic_acl_readonly(
self,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
client_shell: Shell,
container: str,
file_path: str,
cluster: Cluster,
):
"""
Test access to object operations in readonly container.
"""
with reporter.step("Put object to container"):
object_oid = put_object_to_random_node(default_wallet, file_path, container, client_shell, cluster)
with reporter.step("Check others has read-only access to operations with container"):
assert_read_only_container(other_wallet, container, object_oid, file_path, client_shell, cluster)
with reporter.step("Check owner has full access to public container"):
assert_full_access_to_container(default_wallet, container, object_oid, file_path, client_shell, cluster)

View file

@ -2,6 +2,7 @@ import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.storage.dataclasses import ape
@ -11,13 +12,14 @@ from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import TestFile
from ....helpers.container_access import (
from pytest_tests.helpers.container_access import (
ALL_OBJECT_OPERATIONS,
assert_access_to_container,
assert_full_access_to_container,
assert_no_access_to_container,
)
from ....helpers.container_request import APE_EVERYONE_ALLOW_ALL, OWNER_ALLOW_ALL, ContainerRequest
from ....helpers.container_spec import ContainerSpec
@pytest.fixture
@ -112,11 +114,7 @@ class TestApeContainer(ClusterTestBase):
assert_full_access_to_container(other_wallet_2, container, objects.pop(), file_path, self.shell, self.cluster)
@allure.title("Replication works with APE deny rules on OWNER and OTHERS (obj_size={object_size})")
@pytest.mark.parametrize(
"container_request",
[ContainerRequest(f"REP %NODE_COUNT% IN X CBF 1 SELECT %NODE_COUNT% FROM * AS X", APE_EVERYONE_ALLOW_ALL, "custom")],
indirect=True,
)
@pytest.mark.container(ContainerSpec(f"REP %NODE_COUNT% IN X CBF 1 SELECT %NODE_COUNT% FROM * AS X", PUBLIC_ACL))
def test_replication_works_with_deny_rules(
self,
default_wallet: WalletInfo,
@ -152,7 +150,6 @@ class TestApeContainer(ClusterTestBase):
wait_object_replication(container, oid, len(self.cluster.storage_nodes), self.shell, self.cluster.storage_nodes)
@allure.title("Deny operations via APE by role (role=ir, obj_size={object_size})")
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
def test_deny_operations_via_ape_by_role_ir(
self, frostfs_cli: FrostfsCli, ir_wallet: WalletInfo, container: str, objects: list[str], rpc_endpoint: str, file_path: TestFile
):
@ -160,7 +157,7 @@ class TestApeContainer(ClusterTestBase):
ape.ObjectOperations.PUT: False,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE: False,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: False,
@ -189,7 +186,6 @@ class TestApeContainer(ClusterTestBase):
assert_access_to_container(default_ir_access, ir_wallet, container, objects[0], file_path, self.shell, self.cluster)
@allure.title("Deny operations via APE by role (role=container, obj_size={object_size})")
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
def test_deny_operations_via_ape_by_role_container(
self,
frostfs_cli: FrostfsCli,
@ -203,10 +199,10 @@ class TestApeContainer(ClusterTestBase):
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE: False,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: True,
ape.ObjectOperations.DELETE: False,
}
with reporter.step("Assert CONTAINER wallet access in default state"):
@ -221,7 +217,7 @@ class TestApeContainer(ClusterTestBase):
self.wait_for_blocks()
with reporter.step("Assert CONTAINER wallet ignores APE rule"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects[1], file_path, self.shell, self.cluster)
assert_access_to_container(access_matrix, container_node_wallet, container, objects[0], file_path, self.shell, self.cluster)
with reporter.step("Remove APE rule"):
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
@ -230,4 +226,4 @@ class TestApeContainer(ClusterTestBase):
self.wait_for_blocks()
with reporter.step("Assert CONTAINER wallet access after rule was removed"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects[2], file_path, self.shell, self.cluster)
assert_access_to_container(access_matrix, container_node_wallet, container, objects[0], file_path, self.shell, self.cluster)

View file

@ -10,16 +10,17 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile
from ....helpers.bearer_token import create_bearer_token
from ....helpers.container_access import (
from pytest_tests.helpers.bearer_token import create_bearer_token
from pytest_tests.helpers.container_access import (
ALL_OBJECT_OPERATIONS,
FULL_ACCESS,
assert_access_to_container,
assert_full_access_to_container,
assert_no_access_to_container,
)
from ....helpers.container_request import OWNER_ALLOW_ALL
from ....helpers.object_access import OBJECT_ACCESS_DENIED
from pytest_tests.helpers.object_access import OBJECT_ACCESS_DENIED
from ....helpers.container_spec import ContainerSpec
@pytest.mark.nightly
@ -241,7 +242,8 @@ class TestApeFilters(ClusterTestBase):
@allure.title("Operations with allow APE rule with resource filters (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
@pytest.mark.parametrize("object_size, container_request", [("simple", OWNER_ALLOW_ALL)], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
@pytest.mark.container(ContainerSpec(basic_acl="0", allow_owner_via_ape=True))
def test_ape_allow_filters_object(
self,
frostfs_cli: FrostfsCli,
@ -341,7 +343,7 @@ class TestApeFilters(ClusterTestBase):
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer, attributes=allow_attribute)
@allure.title("PUT and GET object using bearer with objectID in filter (obj_size={object_size}, match_type=NOT_EQUAL)")
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
@pytest.mark.container(ContainerSpec(basic_acl="0", allow_owner_via_ape=True))
def test_ape_filter_object_id_not_equals(
self,
frostfs_cli: FrostfsCli,
@ -369,7 +371,7 @@ class TestApeFilters(ClusterTestBase):
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
@allure.title("PUT and GET object using bearer with objectID in filter (obj_size={object_size}, match_type=EQUAL)")
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
@pytest.mark.container(ContainerSpec(basic_acl="0", allow_owner_via_ape=True))
def test_ape_filter_object_id_equals(
self,
frostfs_cli: FrostfsCli,

View file

@ -7,8 +7,8 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile
from ....helpers.bearer_token import create_bearer_token
from ....helpers.container_access import (
from pytest_tests.helpers.bearer_token import create_bearer_token
from pytest_tests.helpers.container_access import (
ALL_OBJECT_OPERATIONS,
assert_access_to_container,
assert_full_access_to_container,
@ -75,7 +75,7 @@ class TestApeBearer(ClusterTestBase):
temp_directory: str,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
container: str,
container: tuple[str, list[str], str],
objects: list[str],
rpc_endpoint: str,
file_path: TestFile,
@ -114,23 +114,24 @@ class TestApeBearer(ClusterTestBase):
bt_access_map = {
ape.Role.OWNER: {
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: False,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: False,
ape.ObjectOperations.SEARCH: False,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: True,
},
# Bearer Token COMPLETLY overrides chains set for the specific target.
# Thus, any restictions or permissions should be explicitly defined in BT.
ape.Role.OTHERS: {
ape.ObjectOperations.PUT: False,
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: False,
ape.ObjectOperations.HEAD: False,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: False,
ape.ObjectOperations.GET_RANGE_HASH: False,
ape.ObjectOperations.SEARCH: False,
ape.ObjectOperations.DELETE: False,
# Although SEARCH is denied by the APE chain defined in Policy contract,
# Bearer Token COMPLETLY overrides chains set for the specific target.
# Thus, any restictions or permissions should be explicitly defined in BT.
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: True,
},
}
@ -147,11 +148,9 @@ class TestApeBearer(ClusterTestBase):
# Operations that we will allow for each role with bearer token
bearer_map = {
ape.Role.OWNER: [
ape.ObjectOperations.PUT,
ape.ObjectOperations.HEAD,
ape.ObjectOperations.GET_RANGE,
# Delete also requires PUT (to make tobstone) and HEAD (to get simple objects header)
ape.ObjectOperations.DELETE,
ape.ObjectOperations.PUT,
ape.ObjectOperations.GET_RANGE,
],
ape.Role.OTHERS: [
ape.ObjectOperations.GET,

View file

@ -1,14 +1,22 @@
import json
import time
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import search_nodes_with_container
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.utils import datetime_utils
from ...helpers.container_spec import ContainerSpec
OBJECT_COUNT = 5
@ -40,6 +48,81 @@ def test_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.
return role_to_wallet_map[role]
@pytest.fixture
def container(
default_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
client_shell: Shell,
cluster: Cluster,
request: pytest.FixtureRequest,
rpc_endpoint: str,
) -> str:
container_spec = _get_container_spec(request)
cid = _create_container_by_spec(default_wallet, client_shell, cluster, rpc_endpoint, container_spec)
if container_spec.allow_owner_via_ape:
_allow_owner_via_ape(frostfs_cli, cluster, cid)
return cid
def _create_container_by_spec(
default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, rpc_endpoint: str, container_spec: ContainerSpec
) -> str:
# TODO: add container spec to step message
with reporter.step("Create container"):
cid = create_container(
default_wallet, client_shell, rpc_endpoint, basic_acl=container_spec.basic_acl, rule=container_spec.parsed_rule(cluster)
)
with reporter.step("Search nodes holding the container"):
container_holder_nodes = search_nodes_with_container(default_wallet, cid, client_shell, cluster.default_rpc_endpoint, cluster)
report_data = {node.id: node.host_ip for node in container_holder_nodes}
reporter.attach(json.dumps(report_data, indent=2), "container_nodes.json")
return cid
def _get_container_spec(request: pytest.FixtureRequest) -> ContainerSpec:
container_marker = request.node.get_closest_marker("container")
# let default container to be public at the moment
container_spec = ContainerSpec(basic_acl=PUBLIC_ACL)
if container_marker:
if len(container_marker.args) != 1:
raise RuntimeError(f"Something wrong with container marker: {container_marker}")
container_spec = container_marker.args[0]
if "param" in request.__dict__:
container_spec = request.param
if not container_spec:
raise RuntimeError(
f"""Container specification is empty.
Either add @pytest.mark.container(ContainerSpec(...)) or
@pytest.mark.parametrize(\"container\", [ContainerSpec(...)], indirect=True) decorator"""
)
return container_spec
def _allow_owner_via_ape(frostfs_cli: FrostfsCli, cluster: Cluster, container: str):
with reporter.step("Create allow APE rule for container owner"):
role_condition = ape.Condition.by_role(ape.Role.OWNER)
deny_rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, role_condition)
frostfs_cli.ape_manager.add(
cluster.default_rpc_endpoint,
deny_rule.chain_id,
target_name=container,
target_type="container",
rule=deny_rule.as_string(),
)
with reporter.step("Wait for one block"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@pytest.fixture
def objects(container: str, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, file_path: str):
with reporter.step("Add test objects to container"):

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,7 @@
import logging
import os
import random
import shutil
from datetime import datetime, timedelta, timezone
from typing import Optional
@ -18,7 +20,6 @@ from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.shell import LocalShell, Shell
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, FROSTFS_CLI_EXEC
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
from frostfs_testlib.steps.epoch import ensure_fresh_epoch
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
@ -30,13 +31,11 @@ from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWra
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.testing.test_control import cached_fixture, run_optionally, wait_for_success
from frostfs_testlib.testing.test_control import run_optionally, wait_for_success
from frostfs_testlib.utils import env_utils, string_utils, version_utils
from frostfs_testlib.utils.file_utils import TestFile, generate_file
from ..helpers.container_creation import create_container_with_ape
from ..helpers.container_request import EVERYONE_ALLOW_ALL, ContainerRequest
from ..resources.common import TEST_CYCLES_COUNT
from pytest_tests.resources.common import TEST_CYCLES_COUNT
logger = logging.getLogger("NeoLogger")
@ -142,16 +141,15 @@ def require_multiple_interfaces(cluster: Cluster):
interfaces = cluster.cluster_nodes[0].host.config.interfaces
if "internal1" not in interfaces or "data1" not in interfaces:
pytest.skip("This test requires multiple internal and data interfaces")
return
yield
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def max_object_size(cluster: Cluster, client_shell: Shell) -> int:
storage_node = cluster.storage_nodes[0]
wallet = WalletInfo.from_node(storage_node)
net_info = get_netmap_netinfo(wallet=wallet, endpoint=storage_node.get_rpc_endpoint(), shell=client_shell)
return net_info["maximum_object_size"]
yield net_info["maximum_object_size"]
@pytest.fixture(scope="session")
@ -160,6 +158,11 @@ def simple_object_size(max_object_size: int) -> ObjectSize:
return ObjectSize("simple", size)
@pytest.fixture()
def file_path(object_size: ObjectSize) -> TestFile:
return generate_file(object_size.value)
@pytest.fixture(scope="session")
def complex_object_size(max_object_size: int) -> ObjectSize:
size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
@ -178,22 +181,6 @@ def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize,
return complex_object_size
@pytest.fixture()
def test_file(object_size: ObjectSize) -> TestFile:
return generate_file(object_size.value)
@pytest.fixture(scope="module")
def test_file_module(object_size: ObjectSize) -> TestFile:
return generate_file(object_size.value)
# Deprecated. Please migrate all to test_file
@pytest.fixture()
def file_path(test_file: TestFile) -> TestFile:
return test_file
@pytest.fixture(scope="session")
def rep_placement_policy() -> PlacementPolicy:
return PlacementPolicy("rep", DEFAULT_PLACEMENT_RULE)
@ -224,10 +211,8 @@ def placement_policy(
) -> PlacementPolicy:
if request.param == "rep":
return rep_placement_policy
elif request.param == "ec":
return ec_placement_policy
return request.param
return ec_placement_policy
@pytest.fixture(scope="session")
@ -433,7 +418,6 @@ def readiness_on_node(cluster_node: ClusterNode):
@reporter.step("Prepare default user with wallet")
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def default_user(credentials_provider: CredentialsProvider, cluster: Cluster) -> User:
user = User(string_utils.unique_name("user-"))
node = cluster.cluster_nodes[0]
@ -450,7 +434,6 @@ def default_wallet(default_user: User) -> WalletInfo:
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def wallets_pool(credentials_provider: CredentialsProvider, cluster: Cluster) -> list[WalletInfo]:
users = [User(string_utils.unique_name("user-")) for _ in range(WALLTETS_IN_POOL)]
parallel(credentials_provider.GRPC.provide, users, cluster_node=cluster.cluster_nodes[0])
@ -486,48 +469,3 @@ def bucket_container_resolver(node_under_test: ClusterNode) -> BucketContainerRe
resolver_cls = plugins.load_plugin("frostfs.testlib.bucket_cid_resolver", node_under_test.host.config.product)
resolver: BucketContainerResolver = resolver_cls()
return resolver
@pytest.fixture(scope="session", params=[pytest.param(EVERYONE_ALLOW_ALL)])
def container_request(request: pytest.FixtureRequest) -> ContainerRequest:
if "param" in request.__dict__:
return request.param
container_marker = request.node.get_closest_marker("container")
# let default container to be public at the moment
container_request = EVERYONE_ALLOW_ALL
if container_marker:
if len(container_marker.args) != 1:
raise RuntimeError(f"Something wrong with container marker: {container_marker}")
container_request = container_marker.args[0]
if not container_request:
raise RuntimeError(
f"""Container specification is empty.
Add @pytest.mark.parametrize("container_request", [ContainerRequest(...)], indirect=True) decorator."""
)
return container_request
@pytest.fixture
def container(
default_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
client_shell: Shell,
cluster: Cluster,
rpc_endpoint: str,
container_request: ContainerRequest,
) -> str:
return create_container_with_ape(frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint, container_request)
@pytest.fixture()
def new_epoch(client_shell: Shell, cluster: Cluster) -> int:
return ensure_fresh_epoch(client_shell, cluster)
@pytest.fixture(scope="module")
def new_epoch_module_scope(client_shell: Shell, cluster: Cluster) -> int:
return ensure_fresh_epoch(client_shell, cluster)

View file

@ -13,27 +13,38 @@ from frostfs_testlib.steps.cli.container import (
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from ...helpers.utility import placement_policy_from_container
from pytest_tests.helpers.utility import placement_policy_from_container
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.container
class TestContainer(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
@allure.title("Create container (name={name})")
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
@pytest.mark.smoke
def test_container_creation(self, default_wallet: WalletInfo, name: str, rpc_endpoint: str):
def test_container_creation(self, default_wallet: WalletInfo, name: str):
wallet = default_wallet
cid = create_container(wallet, self.shell, rpc_endpoint, self.PLACEMENT_RULE, name=name)
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
cid = create_container(
wallet,
rule=placement_rule,
name=name,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
containers = list_containers(wallet, self.shell, rpc_endpoint)
containers = list_containers(wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
assert cid in containers, f"Expected container {cid} in containers: {containers}"
container_info: str = get_container(wallet, cid, self.shell, rpc_endpoint, False)
container_info: str = get_container(
wallet,
cid,
json_mode=False,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
container_info = container_info.casefold() # To ignore case when comparing with expected values
info_to_check = {
@ -45,7 +56,7 @@ class TestContainer(ClusterTestBase):
info_to_check.add(f"Name={name}")
with reporter.step("Check container has correct information"):
expected_policy = self.PLACEMENT_RULE.casefold()
expected_policy = placement_rule.casefold()
actual_policy = placement_policy_from_container(container_info)
assert actual_policy == expected_policy, f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
@ -54,42 +65,50 @@ class TestContainer(ClusterTestBase):
assert expected_info in container_info, f"Expected {expected_info} in container info:\n{container_info}"
with reporter.step("Delete container and check it was deleted"):
# Force to skip frostfs-cli verifictions before delete.
# Because no APE rules assigned to container, those verifications will fail due to APE requests denial.
delete_container(wallet, cid, self.shell, rpc_endpoint, force=True, await_mode=True)
delete_container(
wallet,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
await_mode=True,
)
self.tick_epoch()
wait_for_container_deletion(wallet, cid, self.shell, rpc_endpoint)
@allure.title("Delete container without force (name={name})")
@pytest.mark.smoke
def test_container_deletion_no_force(self, container: str, default_wallet: WalletInfo, rpc_endpoint: str):
with reporter.step("Delete container and check it was deleted"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=True)
self.tick_epoch()
wait_for_container_deletion(default_wallet, container, self.shell, rpc_endpoint)
wait_for_container_deletion(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
@allure.title("Parallel container creation and deletion")
def test_container_creation_deletion_parallel(self, default_wallet: WalletInfo, rpc_endpoint: str):
def test_container_creation_deletion_parallel(self, default_wallet: WalletInfo):
containers_count = 3
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
iteration_count = 10
for _ in range(iteration_count):
for iteration in range(iteration_count):
cids: list[str] = []
with reporter.step(f"Create {containers_count} containers"):
for _ in range(containers_count):
cids.append(
create_container(wallet, self.shell, rpc_endpoint, self.PLACEMENT_RULE, await_mode=False, wait_for_creation=False)
create_container(
wallet,
rule=placement_rule,
await_mode=False,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False,
)
)
with reporter.step("Wait for containers occur in container list"):
for cid in cids:
wait_for_container_creation(wallet, cid, self.shell, rpc_endpoint, sleep_interval=containers_count)
wait_for_container_creation(
wallet,
cid,
sleep_interval=containers_count,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step("Delete containers and check they were deleted"):
for cid in cids:
# Force to skip frostfs-cli verifictions before delete.
# Because no APE rules assigned to container, those verifications will fail due to APE requests denial.
delete_container(wallet, cid, self.shell, rpc_endpoint, force=True, await_mode=True)
containers_list = list_containers(wallet, self.shell, rpc_endpoint)
delete_container(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, await_mode=True)
containers_list = list_containers(wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
assert cid not in containers_list, "Container not deleted"

File diff suppressed because it is too large Load diff

View file

@ -1,640 +0,0 @@
import itertools
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.steps.cli.container import delete_container
from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import get_netmap_snapshot
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.cli_utils import parse_netmap_output
from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest
from ...helpers.policy_validation import get_netmap_param, validate_object_policy
@pytest.mark.weekly
@pytest.mark.policy
@pytest.mark.policy_price
class TestPolicyWithPrice(ClusterTestBase):
@wait_for_success(1200, 60, title="Wait for full field price on node", expected_result=True)
def await_for_price_attribute_on_nodes(self):
netmap = parse_netmap_output(get_netmap_snapshot(node=self.cluster.storage_nodes[0], shell=self.shell))
netmap = get_netmap_param(netmap)
for node in self.cluster.storage_nodes:
node_address = node.get_rpc_endpoint().split(":")[0]
if netmap[node_address]["Price"] is None:
return False
return True
@pytest.fixture(scope="module")
def fill_field_price(self, cluster: Cluster, cluster_state_controller_session: ClusterStateController):
prices = ["15", "10", "65", "55"]
config_manager = cluster_state_controller_session.manager(ConfigStateManager)
parallel(
config_manager.set_on_node,
cluster.cluster_nodes,
StorageNode,
itertools.cycle([{"node:attribute_5": f"Price:{price}"} for price in prices]),
)
cluster_state_controller_session.wait_after_storage_startup()
self.tick_epoch()
self.await_for_price_attribute_on_nodes()
yield
cluster_state_controller_session.manager(ConfigStateManager).revert_all()
@pytest.fixture
def container(
self,
default_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
client_shell: Shell,
cluster: Cluster,
rpc_endpoint: str,
container_request: ContainerRequest,
# In these set of tests containers should be created after the fill_field_price fixture
fill_field_price,
) -> str:
return create_container_with_ape(frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint, container_request)
@allure.title("Policy with SELECT and FILTER results with 25% of available nodes")
@pytest.mark.parametrize(
"container_request",
[PUBLIC_WITH_POLICY("REP 1 IN Nodes25 SELECT 1 FROM LE10 AS Nodes25 FILTER Price LE 10 AS LE10")],
indirect=True,
)
def test_policy_with_select_and_filter_results_with_25_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 25% of available nodes.
"""
placement_params = {"Price": 10}
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0]
with reporter.step(f"Check the node is selected with price <= {placement_params['Price']}"):
assert (
int(netmap[node_address]["Price"]) <= placement_params["Price"]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with select and complex filter results with 25% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 1 IN Nodes25 SELECT 1 FROM BET0AND10 AS Nodes25 FILTER Price LE 10 AS LE10 FILTER Price GT 0 AS GT0 FILTER @LE10 AND @GT0 AS BET0AND10"
)
],
indirect=True,
)
def test_policy_with_select_and_complex_filter_results_with_25_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 25% of available nodes.
"""
placement_params = {"Price": [10, 0]}
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check the node is selected with price between 1 and 10"):
for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0]
assert (
int(netmap[node_address]["Price"]) > placement_params["Price"][1]
and int(netmap[node_address]["Price"]) <= placement_params["Price"][0]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with Multi SELECTs and FILTERs results with 25% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"UNIQUE REP 1 IN One REP 1 IN One CBF 1 SELECT 1 FROM MINMAX AS One FILTER Price LT 15 AS LT15 FILTER Price GT 55 AS GT55 FILTER @LT15 OR @GT55 AS MINMAX"
)
],
indirect=True,
)
def test_policy_with_multi_selects_and_filters_results_with_25_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 25% of available nodes.
"""
placement_params = {"Price": [15, 55]}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with max and min prices"):
for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0]
assert (
int(netmap[node_address]["Price"]) > placement_params["Price"][1]
or int(netmap[node_address]["Price"]) < placement_params["Price"][0]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and FILTER results with 50% of available nodes")
@pytest.mark.parametrize(
"container_request",
[PUBLIC_WITH_POLICY("REP 2 IN HALF CBF 1 SELECT 2 FROM GT15 AS HALF FILTER Price GT 15 AS GT15")],
indirect=True,
)
def test_policy_with_select_and_filter_results_with_50_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 50% of available nodes.
"""
placement_params = {"Price": 15}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with price > {placement_params['Price']}"):
for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0]
assert (
int(netmap[node_address]["Price"]) > placement_params["Price"]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and Complex FILTER results with 50% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 2 IN HALF CBF 2 SELECT 2 FROM GE15 AS HALF FILTER CountryCode NE RU AS NOTRU FILTER @NOTRU AND Price GE 15 AS GE15"
)
],
indirect=True,
)
def test_policy_with_select_and_complex_filter_results_with_50_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 50% of available nodes.
"""
placement_params = {"Price": 15, "country_code": "RU"}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected not with country code '{placement_params['country_code']}'"):
for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0]
assert (
not netmap[node_address]["country_code"] == placement_params["country_code"]
or not netmap[node_address]["country_code"] == placement_params["country_code"]
and int(netmap[node_address]["Price"]) >= placement_params["Price"]
), f"The node is selected with the wrong price or country code. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with Multi SELECTs and FILTERs results with 50% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 2 IN FH REP 1 IN SH CBF 2 SELECT 2 FROM LE55 AS FH SELECT 2 FROM GE15 AS SH FILTER 'UN-LOCODE' EQ 'RU LED' OR 'UN-LOCODE' EQ 'RU MOW' AS RU FILTER NOT (@RU) AS NOTRU FILTER @NOTRU AND Price GE 15 AS GE15 FILTER @RU AND Price LE 55 AS LE55"
)
],
indirect=True,
)
def test_policy_with_multi_selects_and_filters_results_with_50_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 50% of available nodes.
"""
placement_params = {"un_locode": ["RU LED", "RU MOW"], "Price": [15, 55]}
file_path = generate_file(simple_object_size.value)
expected_copies = 3
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check all nodes are selected"):
for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0]
assert (
netmap[node_address]["un_locode"] in placement_params["un_locode"]
or not netmap[node_address]["un_locode"] == placement_params["un_locode"][1]
or (
not netmap[node_address]["un_locode"] == placement_params["un_locode"][1]
and int(netmap[node_address]["Price"]) >= placement_params["Price"][0]
)
or (
netmap[node_address]["un_locode"] == placement_params["un_locode"][1]
and int(netmap[node_address]["Price"]) <= placement_params["Price"][1]
)
), f"The node is selected with the wrong price or un_locode. Expected {placement_params} and got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and FILTER results with 75% of available nodes")
@pytest.mark.parametrize(
"container_request",
[PUBLIC_WITH_POLICY("REP 2 IN NODES75 SELECT 2 FROM LT65 AS NODES75 FILTER Price LT 65 AS LT65")],
indirect=True,
)
def test_policy_with_select_and_filter_results_with_75_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 75% of available nodes.
"""
placement_params = {"Price": 65}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with price < {placement_params['Price']}"):
for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0]
assert (
int(netmap[node_address]["Price"]) < placement_params["Price"]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and Complex FILTER results with 75% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 2 IN NODES75 SELECT 2 FROM LT65 AS NODES75 FILTER Continent NE America AS NOAM FILTER @NOAM AND Price LT 65 AS LT65"
)
],
indirect=True,
)
def test_policy_with_select_and_complex_filter_results_with_75_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 75% of available nodes.
"""
placement_params = {"Price": 65, "continent": "America"}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check three nodes are selected not from {placement_params['continent']}"):
for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0]
assert (
int(netmap[node_address]["Price"]) < placement_params["Price"]
and not netmap[node_address]["continent"] == placement_params["continent"]
) or (
not netmap[node_address]["continent"] == placement_params["continent"]
), f"The node is selected with the wrong price or continent. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with Multi SELECTs and FILTERs results with 75% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 3 IN EXPNSV REP 3 IN CHEAP SELECT 3 FROM GT10 AS EXPNSV SELECT 3 FROM LT65 AS CHEAP FILTER NOT (Continent EQ America) AS NOAM FILTER @NOAM AND Price LT 65 AS LT65 FILTER @NOAM AND Price GT 10 AS GT10"
)
],
indirect=True,
)
def test_policy_with_multi_selects_and_filters_results_with_75_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 75% of available nodes.
"""
placement_params = {"Price": [65, 10], "continent": "America"}
file_path = generate_file(simple_object_size.value)
expected_copies = 4
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check all nodes are selected"):
for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0]
assert (
(
int(netmap[node_address]["Price"]) > placement_params["Price"][1]
and not netmap[node_address]["continent"] == placement_params["continent"]
)
or (
int(netmap[node_address]["Price"]) < placement_params["Price"][0]
and not netmap[node_address]["continent"] == placement_params["continent"]
)
or not (netmap[node_address]["continent"] == placement_params["continent"])
), f"The node is selected with the wrong price or continent. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and FILTER results with 100% of available nodes")
@pytest.mark.parametrize(
"container_request", [PUBLIC_WITH_POLICY("REP 1 IN All SELECT 4 FROM AllNodes AS All FILTER Price GE 0 AS AllNodes")], indirect=True
)
def test_policy_with_select_and_filter_results_with_100_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 100% of available nodes.
"""
placement_params = {"Price": 0}
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0]
with reporter.step(f"Check the node is selected with price >= {placement_params['Price']}"):
assert (
int(netmap[node_address]["Price"]) >= placement_params["Price"]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with Multi SELECTs and FILTERs results with 100% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 4 IN AllOne REP 4 IN AllTwo CBF 4 SELECT 2 FROM GEZero AS AllOne SELECT 2 FROM AllCountries AS AllTwo FILTER Country EQ Russia OR Country EQ Sweden OR Country EQ Finland AS AllCountries FILTER Price GE 0 AS GEZero"
)
],
indirect=True,
)
def test_policy_with_multi_selects_and_filters_results_with_100_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 100% of available nodes.
"""
placement_params = {"country": ["Russia", "Sweden", "Finland"], "Price": 0}
file_path = generate_file(simple_object_size.value)
expected_copies = 4
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check all node are selected"):
for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0]
assert (netmap[node_address]["country"] in placement_params["country"]) or (
int(netmap[node_address]["Price"]) >= placement_params["Price"]
), f"The node is selected from the wrong country or with wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)

View file

@ -34,8 +34,6 @@ from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_keeper import FileKeeper
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...resources.common import S3_POLICY_FILE_LOCATION
logger = logging.getLogger("NeoLogger")
stopped_nodes: list[StorageNode] = []
@ -97,7 +95,9 @@ class TestFailoverStorage(ClusterTestBase):
)
with reporter.step("Check object data is not corrupted"):
got_file_path = get_object(wallet, cid, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell)
got_file_path = get_object(
wallet, cid, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
with reporter.step("Return all hosts"):
@ -105,10 +105,12 @@ class TestFailoverStorage(ClusterTestBase):
with reporter.step("Check object data is not corrupted"):
replicated_nodes = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
got_file_path = get_object(wallet, cid, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint())
got_file_path = get_object(
wallet, cid, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint()
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
@pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
@allure.title("Do not ignore unhealthy tree endpoints (s3_client={s3_client})")
def test_unhealthy_tree(
self,
@ -149,7 +151,7 @@ class TestFailoverStorage(ClusterTestBase):
wallet=default_wallet,
shell=self.shell,
endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
bucket_container_resolver=bucket_container_resolver,
bucket_container_resolver=bucket_container_resolver
)[0]
with reporter.step("Turn off all storage nodes except bucket node"):
@ -280,7 +282,9 @@ class TestEmptyMap(ClusterTestBase):
cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Remove all nodes from network map"):
remove_nodes_from_map_morph(shell=self.shell, cluster=self.cluster, remove_nodes=self.cluster.services(StorageNode))
remove_nodes_from_map_morph(
shell=self.shell, cluster=self.cluster, remove_nodes=self.cluster.services(StorageNode)
)
with reporter.step("Return all storage nodes to network map"):
self.return_nodes_after_stop_with_check_empty_map(cluster_state_controller)
@ -461,7 +465,9 @@ class TestStorageDataLoss(ClusterTestBase):
s3_client.put_object(bucket, complex_object_path)
with reporter.step("Check objects are in bucket"):
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[simple_object_key, complex_object_key])
s3_helper.check_objects_in_bucket(
s3_client, bucket, expected_objects=[simple_object_key, complex_object_key]
)
with reporter.step("Stop storage services on all nodes"):
cluster_state_controller.stop_services_of_type(StorageNode)
@ -575,13 +581,17 @@ class TestStorageDataLoss(ClusterTestBase):
exception_messages.append(f"Shard {shard} changed status to {status}")
with reporter.step("No related errors should be in log"):
if node_under_test.host.is_message_in_logs(message_regex=r"\Wno such file or directory\W", since=test_start_time):
if node_under_test.host.is_message_in_logs(
message_regex=r"\Wno such file or directory\W", since=test_start_time
):
exception_messages.append(f"Node {node_under_test} have shard errors in logs")
with reporter.step("Pass test if no errors found"):
assert not exception_messages, "\n".join(exception_messages)
@allure.title("Loss of one node should trigger use of tree and storage service in another node (s3_client={s3_client})")
@allure.title(
"Loss of one node should trigger use of tree and storage service in another node (s3_client={s3_client})"
)
def test_s3_one_endpoint_loss(
self,
bucket,
@ -603,7 +613,7 @@ class TestStorageDataLoss(ClusterTestBase):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
@pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
@allure.title("After Pilorama.db loss on one node object is retrievable (s3_client={s3_client})")
def test_s3_one_pilorama_loss(
self,

View file

@ -44,7 +44,7 @@ from frostfs_testlib.utils import string_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")
check_nodes: list[StorageNode] = []
@ -364,7 +364,7 @@ class TestMaintenanceMode(ClusterTestBase):
cluster_state_controller: ClusterStateController,
restore_node_status: list[ClusterNode],
):
with reporter.step("Create container and put object"):
with reporter.step("Create container and create\put object"):
cid = create_container(
wallet=default_wallet,
shell=self.shell,

View file

@ -1,6 +1,7 @@
import math
import allure
from frostfs_testlib.testing.parallel import parallel
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container, delete_container, search_nodes_with_container, wait_for_container_deletion
@ -11,20 +12,19 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.utility import are_numbers_similar
@pytest.mark.nightly
@pytest.mark.metrics
@pytest.mark.container
class TestContainerMetrics(ClusterTestBase):
@reporter.step("Put object to container: {cid}")
def put_object_parallel(self, file_path: str, wallet: WalletInfo, cid: str):
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
return oid
@reporter.step("Get metrics value from node")
def get_metrics_search_by_greps_parallel(self, node: ClusterNode, **greps):
try:
@ -139,7 +139,12 @@ class TestContainerMetrics(ClusterTestBase):
@allure.title("Container size metrics put {objects_count} objects (obj_size={object_size})")
@pytest.mark.parametrize("objects_count", [5, 10, 20])
def test_container_size_metrics_more_objects(self, object_size: ObjectSize, default_wallet: WalletInfo, objects_count: int):
def test_container_size_metrics_more_objects(
self,
object_size: ObjectSize,
default_wallet: WalletInfo,
objects_count: int
):
with reporter.step(f"Create container"):
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint)
@ -149,14 +154,10 @@ class TestContainerMetrics(ClusterTestBase):
oids = [future.result() for future in futures]
with reporter.step("Check metric appears in all nodes"):
metric_values = [
get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=cid) for node in self.cluster.cluster_nodes
]
actual_value = sum(metric_values) // 2 # for policy REP 2, value divide by 2
metric_values = [get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=cid) for node in self.cluster.cluster_nodes]
actual_value = sum(metric_values) // 2 # for policy REP 2, value divide by 2
expected_value = object_size.value * objects_count
assert are_numbers_similar(
actual_value, expected_value, tolerance_percentage=2
), "metric container size bytes value not correct"
assert are_numbers_similar(actual_value, expected_value, tolerance_percentage=2), "metric container size bytes value not correct"
with reporter.step("Delete file, wait until gc remove object"):
tombstones_size = 0
@ -172,10 +173,16 @@ class TestContainerMetrics(ClusterTestBase):
assert act_metric >= 0, "Metrics value is negative"
assert sum(metrics_value_nodes) // len(self.cluster.cluster_nodes) == tombstones_size, "tomstone size of objects not correct"
@allure.title("Container metrics (policy={policy})")
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")])
def test_container_metrics_delete_complex_objects(
self, complex_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, placement_policy: str, policy: str
self,
complex_object_size: ObjectSize,
default_wallet: WalletInfo,
cluster: Cluster,
placement_policy: str,
policy: str
):
copies = 2 if policy == "REP" else 1
objects_count = 2
@ -194,9 +201,9 @@ class TestContainerMetrics(ClusterTestBase):
with reporter.step("Delete objects and container"):
for oid in oids:
delete_object(default_wallet, cid, oid, self.shell, cluster.default_rpc_endpoint)
delete_container(default_wallet, cid, self.shell, cluster.default_rpc_endpoint)
with reporter.step("Tick epoch and check container was deleted"):
self.tick_epoch()
wait_for_container_deletion(default_wallet, cid, shell=self.shell, endpoint=cluster.default_rpc_endpoint)

View file

@ -5,7 +5,6 @@ import sys
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import (
INVALID_LENGTH_SPECIFIER,
INVALID_OFFSET_SPECIFIER,
@ -15,7 +14,7 @@ from frostfs_testlib.resources.error_patterns import (
OUT_OF_RANGE,
)
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, search_nodes_with_container
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
from frostfs_testlib.steps.cli.object import (
get_object_from_random_node,
get_range,
@ -34,16 +33,12 @@ from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile, get_file_content, get_file_hash
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
from frostfs_testlib.utils.file_utils import generate_file, get_file_content, get_file_hash
logger = logging.getLogger("NeoLogger")
CLEANUP_TIMEOUT = 10
COMMON_ATTRIBUTE = {"common_key": "common_value"}
COMMON_CONTAINER_RULE = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
# Will upload object for each attribute set
OBJECT_ATTRIBUTES = [
None,
@ -85,7 +80,7 @@ def generate_ranges(storage_object: StorageObjectInfo, max_object_size: int, she
for offset, length in file_ranges:
range_length = random.randint(RANGE_MIN_LEN, RANGE_MAX_LEN)
range_start = random.randint(offset, offset + length - 1)
range_start = random.randint(offset, offset + length)
file_ranges_to_test.append((range_start, min(range_length, storage_object.size - range_start)))
@ -95,15 +90,12 @@ def generate_ranges(storage_object: StorageObjectInfo, max_object_size: int, she
@pytest.fixture(scope="module")
def common_container(
frostfs_cli: FrostfsCli,
default_wallet: WalletInfo,
client_shell: Shell,
cluster: Cluster,
rpc_endpoint: str,
) -> str:
container_request = ContainerRequest(COMMON_CONTAINER_RULE, APE_EVERYONE_ALLOW_ALL)
return create_container_with_ape(frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint, container_request)
def common_container(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster) -> str:
rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
with reporter.step(f"Create container with {rule} and put object"):
cid = create_container(default_wallet, client_shell, cluster.default_rpc_endpoint, rule)
return cid
@pytest.fixture(scope="module")
@ -125,35 +117,33 @@ def storage_objects(
client_shell: Shell,
cluster: Cluster,
object_size: ObjectSize,
test_file_module: TestFile,
frostfs_cli: FrostfsCli,
placement_policy: PlacementPolicy,
rpc_endpoint: str,
) -> list[StorageObjectInfo]:
cid = create_container_with_ape(
frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint, ContainerRequest(placement_policy.value, APE_EVERYONE_ALLOW_ALL)
)
wallet = default_wallet
# Separate containers for complex/simple objects to avoid side-effects
cid = create_container(wallet, shell=client_shell, rule=placement_policy.value, endpoint=cluster.default_rpc_endpoint)
file_path = generate_file(object_size.value)
file_hash = get_file_hash(file_path)
with reporter.step("Generate file"):
file_hash = get_file_hash(test_file_module.path)
storage_objects = []
with reporter.step("Put objects"):
# We need to upload objects multiple times with different attributes
for attributes in OBJECT_ATTRIBUTES:
storage_object_id = put_object_to_random_node(
default_wallet,
test_file_module.path,
cid,
client_shell,
cluster,
wallet=wallet,
path=file_path,
cid=cid,
shell=client_shell,
cluster=cluster,
attributes=attributes,
)
storage_object = StorageObjectInfo(cid, storage_object_id)
storage_object.size = object_size.value
storage_object.wallet = default_wallet
storage_object.file_path = test_file_module.path
storage_object.wallet = wallet
storage_object.file_path = file_path
storage_object.file_hash = file_hash
storage_object.attributes = attributes
@ -251,26 +241,21 @@ class TestObjectApi(ClusterTestBase):
)
self.check_header_is_presented(head_info, storage_object_2.attributes)
@allure.title("Head deleted object with --raw arg (obj_size={object_size}, policy={container_request})")
@pytest.mark.parametrize(
"container_request",
[
ContainerRequest(DEFAULT_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "rep"),
ContainerRequest(DEFAULT_EC_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "ec"),
],
indirect=True,
ids=["rep", "ec"],
)
def test_object_head_raw(self, default_wallet: str, container: str, test_file: TestFile):
@allure.title("Head deleted object with --raw arg (obj_size={object_size}, policy={placement_policy})")
def test_object_head_raw(self, default_wallet: str, object_size: ObjectSize, placement_policy: PlacementPolicy):
with reporter.step("Create container"):
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy.value)
with reporter.step("Upload object"):
oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster)
file_path = generate_file(object_size.value)
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, self.cluster)
with reporter.step("Delete object"):
delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step("Call object head --raw and expect error"):
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
head_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint, is_raw=True)
head_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint, is_raw=True)
@allure.title("Search objects by native API (obj_size={object_size}, policy={placement_policy})")
def test_search_object_api(self, storage_objects: list[StorageObjectInfo]):
@ -314,50 +299,54 @@ class TestObjectApi(ClusterTestBase):
assert sorted(expected_oids) == sorted(result)
@allure.title("Search objects with removed items (obj_size={object_size})")
def test_object_search_should_return_tombstone_items(
self,
default_wallet: WalletInfo,
container: str,
object_size: ObjectSize,
rpc_endpoint: str,
test_file: TestFile,
):
def test_object_search_should_return_tombstone_items(self, default_wallet: WalletInfo, object_size: ObjectSize):
"""
Validate object search with removed items
"""
with reporter.step("Put object to container"):
wallet = default_wallet
cid = create_container(wallet, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step("Upload file"):
file_path = generate_file(object_size.value)
file_hash = get_file_hash(file_path)
storage_object = StorageObjectInfo(
cid=container,
oid=put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster),
cid=cid,
oid=put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster),
size=object_size.value,
wallet=default_wallet,
file_path=test_file.path,
file_hash=get_file_hash(test_file.path),
wallet=wallet,
file_path=file_path,
file_hash=file_hash,
)
with reporter.step("Search object"):
# Root Search object should return root object oid
result = search_object(default_wallet, container, self.shell, rpc_endpoint, root=True)
objects_before_deletion = len(result)
assert storage_object.oid in result
result = search_object(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True)
assert result == [storage_object.oid]
with reporter.step("Delete object"):
with reporter.step("Delete file"):
delete_objects([storage_object], self.shell, self.cluster)
with reporter.step("Search deleted object with --root"):
# Root Search object should return nothing
result = search_object(default_wallet, container, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True)
result = search_object(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True)
assert len(result) == 0
with reporter.step("Search deleted object with --phy should return only tombstones"):
# Physical Search object should return only tombstones
result = search_object(default_wallet, container, self.shell, rpc_endpoint, phy=True)
result = search_object(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, phy=True)
assert storage_object.tombstone in result, "Search result should contain tombstone of removed object"
assert storage_object.oid not in result, "Search result should not contain ObjectId of removed object"
for tombstone_oid in result:
head_info = head_object(default_wallet, container, tombstone_oid, self.shell, rpc_endpoint)
object_type = head_info["header"]["objectType"]
header = head_object(
wallet,
cid,
tombstone_oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)["header"]
object_type = header["objectType"]
assert object_type == "TOMBSTONE", f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}"
@allure.title("Get range hash by native API (obj_size={object_size}, policy={placement_policy})")

View file

@ -2,12 +2,14 @@ import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import (
REP_2_FOR_3_NODES_PLACEMENT_RULE,
SINGLE_PLACEMENT_RULE,
StorageContainer,
StorageContainerInfo,
create_container,
)
from frostfs_testlib.steps.cli.object import delete_object, get_object
from frostfs_testlib.steps.storage_object import StorageObjectInfo
@ -19,20 +21,15 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from pytest import FixtureRequest
from ...helpers.bearer_token import create_bearer_token
from ...helpers.container_access import assert_full_access_to_container
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import ContainerRequest
from pytest_tests.helpers.bearer_token import create_bearer_token
from pytest_tests.helpers.container_access import assert_full_access_to_container
@pytest.fixture(scope="session")
@allure.title("Create user container for bearer token usage")
def user_container(
frostfs_cli: FrostfsCli, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, rpc_endpoint: str, request: FixtureRequest
) -> StorageContainer:
policy = request.param if "param" in request.__dict__ else SINGLE_PLACEMENT_RULE
container_request = ContainerRequest(policy)
container_id = create_container_with_ape(frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint, container_request)
def user_container(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, request: FixtureRequest) -> StorageContainer:
rule = request.param if "param" in request.__dict__ else SINGLE_PLACEMENT_RULE
container_id = create_container(default_wallet, client_shell, cluster.default_rpc_endpoint, rule, PUBLIC_ACL)
# Deliberately using s3gate wallet here to test bearer token
s3_gate_wallet = WalletInfo.from_node(cluster.s3_gates[0])
@ -68,7 +65,6 @@ def storage_objects(
@pytest.mark.nightly
@pytest.mark.bearer
@pytest.mark.ape
@pytest.mark.grpc_api
class TestObjectApiWithBearerToken(ClusterTestBase):
@allure.title("Object can be deleted from any node using s3gate wallet with bearer token (obj_size={object_size})")
@pytest.mark.parametrize(

View file

@ -4,14 +4,15 @@ import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object_from_random_node, head_object, put_object_to_random_node
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")
@ -21,30 +22,35 @@ logger = logging.getLogger("NeoLogger")
@pytest.mark.grpc_api
class TestObjectApiLifetime(ClusterTestBase):
@allure.title("Object is removed when lifetime expired (obj_size={object_size})")
def test_object_api_lifetime(self, container: str, test_file: TestFile, default_wallet: WalletInfo):
def test_object_api_lifetime(self, default_wallet: WalletInfo, object_size: ObjectSize):
"""
Test object deleted after expiration epoch.
"""
wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
cid = create_container(wallet, self.shell, endpoint)
file_path = generate_file(object_size.value)
file_hash = get_file_hash(file_path)
epoch = get_epoch(self.shell, self.cluster)
oid = put_object_to_random_node(wallet, test_file.path, container, self.shell, self.cluster, expire_at=epoch + 1)
with expect_not_raises():
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster, expire_at=epoch + 1)
got_file = get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)
assert get_file_hash(got_file) == file_hash
with reporter.step("Tick two epochs"):
self.tick_epochs(2)
for _ in range(2):
self.tick_epoch()
# Wait for GC, because object with expiration is counted as alive until GC removes it
wait_for_gc_pass_on_storage_nodes()
with reporter.step("Check object deleted because it expires on epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
head_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, container, oid, self.shell, self.cluster)
get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)
with reporter.step("Tick additional epoch"):
self.tick_epoch()
@ -53,6 +59,6 @@ class TestObjectApiLifetime(ClusterTestBase):
with reporter.step("Check object deleted because it expires on previous epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
head_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, container, oid, self.shell, self.cluster)
get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)

View file

@ -1,23 +1,25 @@
import logging
import re
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.resources.error_patterns import (
LIFETIME_REQUIRED,
LOCK_NON_REGULAR_OBJECT,
LOCK_OBJECT_EXPIRATION,
LOCK_OBJECT_REMOVAL,
OBJECT_ALREADY_REMOVED,
OBJECT_IS_LOCKED,
OBJECT_NOT_FOUND,
)
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
from frostfs_testlib.steps.cli.object import delete_object, head_object, lock_object
from frostfs_testlib.steps.complex_object_actions import get_link_object, get_storage_object_chunks
from frostfs_testlib.steps.epoch import ensure_fresh_epoch
from frostfs_testlib.steps.epoch import ensure_fresh_epoch, get_epoch, tick_epoch
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
@ -27,11 +29,9 @@ from frostfs_testlib.storage.dataclasses.storage_object_info import LockObjectIn
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils import datetime_utils, string_utils
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import EVERYONE_ALLOW_ALL
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")
@ -40,15 +40,25 @@ FIXTURE_OBJECT_LIFETIME = 10
@pytest.fixture(scope="module")
def user_container(
frostfs_cli: FrostfsCli, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, rpc_endpoint: str
) -> StorageContainer:
cid = create_container_with_ape(frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint, EVERYONE_ALLOW_ALL)
return StorageContainer(StorageContainerInfo(cid, default_wallet), client_shell, cluster)
def user_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
with reporter.step("Create user wallet with container"):
user = User(string_utils.unique_name("user-"))
return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
@pytest.fixture(scope="module")
def locked_storage_object(user_container: StorageContainer, client_shell: Shell, cluster: Cluster, object_size: ObjectSize):
def user_container(user_wallet: WalletInfo, client_shell: Shell, cluster: Cluster):
container_id = create_container(user_wallet, shell=client_shell, endpoint=cluster.default_rpc_endpoint)
return StorageContainer(StorageContainerInfo(container_id, user_wallet), client_shell, cluster)
@pytest.fixture(scope="module")
def locked_storage_object(
user_container: StorageContainer,
client_shell: Shell,
cluster: Cluster,
object_size: ObjectSize,
):
"""
Intention of this fixture is to provide storage object which is NOT expected to be deleted during test act phase
"""
@ -67,22 +77,56 @@ def locked_storage_object(user_container: StorageContainer, client_shell: Shell,
)
storage_object.locks = [LockObjectInfo(storage_object.cid, lock_object_id, FIXTURE_LOCK_LIFETIME, expiration_epoch)]
return storage_object
yield storage_object
with reporter.step("Delete created locked object"):
current_epoch = get_epoch(client_shell, cluster)
epoch_diff = expiration_epoch - current_epoch + 1
if epoch_diff > 0:
with reporter.step(f"Tick {epoch_diff} epochs"):
for _ in range(epoch_diff):
tick_epoch(client_shell, cluster)
try:
delete_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
client_shell,
cluster.default_rpc_endpoint,
)
except Exception as ex:
ex_message = str(ex)
# It's okay if object already removed
if not re.search(OBJECT_NOT_FOUND, ex_message) and not re.search(OBJECT_ALREADY_REMOVED, ex_message):
raise ex
logger.debug(ex_message)
@wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME))
def check_object_not_found(wallet: WalletInfo, cid: str, oid: str, shell: Shell, rpc_endpoint: str):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, cid, oid, shell, rpc_endpoint)
head_object(
wallet,
cid,
oid,
shell,
rpc_endpoint,
)
def verify_object_available(wallet: WalletInfo, cid: str, oid: str, shell: Shell, rpc_endpoint: str):
with expect_not_raises():
head_object(wallet, cid, oid, shell, rpc_endpoint)
head_object(
wallet,
cid,
oid,
shell,
rpc_endpoint,
)
@pytest.mark.nightly
@pytest.mark.grpc_api
@pytest.mark.grpc_object_lock
class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.fixture()

View file

@ -1,62 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import EXPIRATION_EPOCH_ATTRIBUTE
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile
@pytest.mark.nightly
@pytest.mark.grpc_api
class TestObjectTombstone(ClusterTestBase):
@pytest.fixture()
@allure.title("Change tombstone lifetime")
def tombstone_lifetime(self, cluster_state_controller: ClusterStateController, request: pytest.FixtureRequest):
config_manager = cluster_state_controller.manager(ConfigStateManager)
config_manager.set_on_all_nodes(StorageNode, {"object:delete:tombstone_lifetime": request.param}, True)
yield f"Tombstone lifetime was changed to {request.param}"
config_manager.revert_all(True)
@pytest.mark.parametrize("object_size, tombstone_lifetime", [("simple", 2)], indirect=True)
@allure.title("Tombstone object should be removed after expiration")
def test_tombstone_lifetime(
self,
new_epoch: int,
container: str,
grpc_client: GrpcClientWrapper,
test_file: TestFile,
rpc_endpoint: str,
tombstone_lifetime: str,
):
allure.dynamic.description(tombstone_lifetime)
with reporter.step("Put object"):
oid = grpc_client.object.put(test_file.path, container, rpc_endpoint)
with reporter.step("Remove object"):
tombstone_oid = grpc_client.object.delete(container, oid, rpc_endpoint)
with reporter.step("Get tombstone object lifetime"):
tombstone_info = grpc_client.object.head(container, tombstone_oid, rpc_endpoint)
tombstone_expiration_epoch = tombstone_info["header"]["attributes"][EXPIRATION_EPOCH_ATTRIBUTE]
with reporter.step("Tombstone lifetime should be <= 3"):
epochs_to_skip = int(tombstone_expiration_epoch) - new_epoch + 1
assert epochs_to_skip <= 3
with reporter.step("Wait for tombstone expiration"):
self.tick_epochs(epochs_to_skip)
with reporter.step("Tombstone should be removed after expiration"):
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
grpc_client.object.head(container, tombstone_oid, rpc_endpoint)
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
grpc_client.object.get(container, tombstone_oid, rpc_endpoint)

View file

@ -7,7 +7,9 @@ from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.error_patterns import OBJECT_IS_LOCKED
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL_F
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
@ -17,7 +19,6 @@ logger = logging.getLogger("NeoLogger")
@pytest.mark.nightly
@pytest.mark.grpc_api
@pytest.mark.grpc_without_user
class TestObjectApiWithoutUser(ClusterTestBase):
def _parse_oid(self, stdout: str) -> str:
@ -30,72 +31,96 @@ class TestObjectApiWithoutUser(ClusterTestBase):
tombstone = id_str.split(":")[1]
return tombstone.strip()
@pytest.fixture(scope="function")
def public_container(self, default_wallet: WalletInfo) -> str:
with reporter.step("Create public container"):
cid_public = create_container(
default_wallet,
self.shell,
self.cluster.default_rpc_endpoint,
basic_acl=PUBLIC_ACL_F,
)
return cid_public
@pytest.fixture(scope="class")
def cli_without_wallet(self, client_shell: Shell) -> FrostfsCli:
def frostfs_cli(self, client_shell: Shell) -> FrostfsCli:
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC)
@allure.title("Get public container by native API with generate private key")
def test_get_container_with_generated_key(self, cli_without_wallet: FrostfsCli, container: str, rpc_endpoint: str):
def test_get_container_with_generated_key(self, frostfs_cli: FrostfsCli, public_container: str):
"""
Validate `container get` native API with flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Get container with generate key"):
with expect_not_raises():
cli_without_wallet.container.get(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
frostfs_cli.container.get(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Get list containers by native API with generate private key")
def test_list_containers_with_generated_key(
self, cli_without_wallet: FrostfsCli, default_wallet: WalletInfo, container: str, rpc_endpoint: str
):
def test_list_containers_with_generated_key(self, frostfs_cli: FrostfsCli, default_wallet: WalletInfo, public_container: str):
"""
Validate `container list` native API with flag `--generate-key`.
"""
rpc_endpoint = self.cluster.default_rpc_endpoint
owner = default_wallet.get_address_from_json(0)
with reporter.step("List containers with generate key"):
with expect_not_raises():
result = cli_without_wallet.container.list(rpc_endpoint, owner=owner, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
result = frostfs_cli.container.list(rpc_endpoint, owner=owner, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect container in received containers list"):
containers = result.stdout.split()
assert container in containers
assert public_container in containers
@allure.title("Get list of public container objects by native API with generate private key")
def test_list_objects_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, rpc_endpoint: str):
def test_list_objects_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str):
"""
Validate `container list_objects` native API with flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("List objects with generate key"):
with expect_not_raises():
result = cli_without_wallet.container.list_objects(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
result = frostfs_cli.container.list_objects(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect empty objects list"):
objects = result.stdout.split()
assert len(objects) == 0, objects
@allure.title("Search public container nodes by native API with generate private key")
def test_search_nodes_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, rpc_endpoint: str):
def test_search_nodes_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str):
"""
Validate `container search_node` native API with flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Search nodes with generate key"):
with expect_not_raises():
cli_without_wallet.container.search_node(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
frostfs_cli.container.search_node(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Put object into public container by native API with generate private key (obj_size={object_size})")
def test_put_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
def test_put_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object put` into container with public ACL and flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
with expect_not_raises():
result = cli_without_wallet.object.put(
result = frostfs_cli.object.put(
rpc_endpoint,
container,
cid,
file_path,
generate_key=True,
no_progress=True,
@ -105,24 +130,26 @@ class TestObjectApiWithoutUser(ClusterTestBase):
oid = self._parse_oid(result.stdout)
with reporter.step("List objects with generate key"):
result = cli_without_wallet.container.list_objects(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
result = frostfs_cli.container.list_objects(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect object in received objects list"):
objects = result.stdout.split()
assert oid in objects, objects
@allure.title("Get public container object by native API with generate private key (obj_size={object_size})")
def test_get_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
def test_get_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object get` for container with public ACL and flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
expected_hash = get_file_hash(file_path)
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
result = frostfs_cli.object.put(
rpc_endpoint,
container,
cid,
file_path,
generate_key=True,
no_progress=True,
@ -133,9 +160,9 @@ class TestObjectApiWithoutUser(ClusterTestBase):
with reporter.step("Get object with generate key"):
with expect_not_raises():
cli_without_wallet.object.get(
frostfs_cli.object.get(
rpc_endpoint,
container,
cid,
oid,
file=file_path,
generate_key=True,
@ -149,15 +176,18 @@ class TestObjectApiWithoutUser(ClusterTestBase):
assert expected_hash == downloaded_hash
@allure.title("Head public container object by native API with generate private key (obj_size={object_size})")
def test_head_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
def test_head_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object head` for container with public ACL and flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
result = frostfs_cli.object.put(
rpc_endpoint,
container,
cid,
file_path,
generate_key=True,
no_progress=True,
@ -168,18 +198,21 @@ class TestObjectApiWithoutUser(ClusterTestBase):
with reporter.step("Head object with generate key"):
with expect_not_raises():
cli_without_wallet.object.head(rpc_endpoint, container, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
frostfs_cli.object.head(rpc_endpoint, cid, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Delete public container object by native API with generate private key (obj_size={object_size})")
def test_delete_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
def test_delete_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object delete` for container with public ACL and flag `--generate key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
result = frostfs_cli.object.put(
rpc_endpoint,
container,
cid,
file_path,
generate_key=True,
no_progress=True,
@ -190,14 +223,14 @@ class TestObjectApiWithoutUser(ClusterTestBase):
with reporter.step("Delete object with generate key"):
with expect_not_raises():
result = cli_without_wallet.object.delete(rpc_endpoint, container, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
result = frostfs_cli.object.delete(rpc_endpoint, cid, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
oid = self._parse_tombstone_oid(result.stdout)
with reporter.step("Head object with generate key"):
result = cli_without_wallet.object.head(
result = frostfs_cli.object.head(
rpc_endpoint,
container,
cid,
oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
@ -208,16 +241,19 @@ class TestObjectApiWithoutUser(ClusterTestBase):
assert object_type == "TOMBSTONE", object_type
@allure.title("Lock public container object by native API with generate private key (obj_size={object_size})")
def test_lock_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
def test_lock_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object lock` for container with public ACL and flag `--generate-key`.
Attempt to delete the locked object.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
result = frostfs_cli.object.put(
rpc_endpoint,
container,
cid,
file_path,
generate_key=True,
no_progress=True,
@ -228,9 +264,9 @@ class TestObjectApiWithoutUser(ClusterTestBase):
with reporter.step("Lock object with generate key"):
with expect_not_raises():
cli_without_wallet.object.lock(
frostfs_cli.object.lock(
rpc_endpoint,
container,
cid,
oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
@ -239,24 +275,27 @@ class TestObjectApiWithoutUser(ClusterTestBase):
with reporter.step("Delete locked object with generate key and expect error"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
cli_without_wallet.object.delete(
frostfs_cli.object.delete(
rpc_endpoint,
container,
cid,
oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
@allure.title("Search public container objects by native API with generate private key (obj_size={object_size})")
def test_search_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
def test_search_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object search` for container with public ACL and flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
result = frostfs_cli.object.put(
rpc_endpoint,
container,
cid,
file_path,
generate_key=True,
no_progress=True,
@ -267,22 +306,25 @@ class TestObjectApiWithoutUser(ClusterTestBase):
with reporter.step("Object search with generate key"):
with expect_not_raises():
result = cli_without_wallet.object.search(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
result = frostfs_cli.object.search(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect object in received objects list of container"):
object_ids = re.findall(r"(\w{43,44})", result.stdout)
assert oid in object_ids
@allure.title("Get range of public container object by native API with generate private key (obj_size={object_size})")
def test_range_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
def test_range_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object range` for container with public ACL and `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
result = frostfs_cli.object.put(
rpc_endpoint,
container,
cid,
file_path,
generate_key=True,
no_progress=True,
@ -293,9 +335,9 @@ class TestObjectApiWithoutUser(ClusterTestBase):
with reporter.step("Get range of object with generate key"):
with expect_not_raises():
cli_without_wallet.object.range(
frostfs_cli.object.range(
rpc_endpoint,
container,
cid,
oid,
"0:10",
file=file_path,
@ -304,15 +346,18 @@ class TestObjectApiWithoutUser(ClusterTestBase):
)
@allure.title("Get hash of public container object by native API with generate private key (obj_size={object_size})")
def test_hash_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
def test_hash_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object hash` for container with public ACL and `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
result = frostfs_cli.object.put(
rpc_endpoint,
container,
cid,
file_path,
generate_key=True,
no_progress=True,
@ -323,9 +368,9 @@ class TestObjectApiWithoutUser(ClusterTestBase):
with reporter.step("Get range hash of object with generate key"):
with expect_not_raises():
cli_without_wallet.object.hash(
frostfs_cli.object.hash(
rpc_endpoint,
container,
cid,
oid,
range="0:10",
generate_key=True,
@ -333,15 +378,18 @@ class TestObjectApiWithoutUser(ClusterTestBase):
)
@allure.title("Get public container object nodes by native API with generate private key (obj_size={object_size})")
def test_nodes_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
def test_nodes_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object nodes` for container with public ACL and `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
result = frostfs_cli.object.put(
rpc_endpoint,
container,
cid,
file_path,
no_progress=True,
generate_key=True,
@ -353,14 +401,14 @@ class TestObjectApiWithoutUser(ClusterTestBase):
with reporter.step("Configure frostfs-cli for alive remote node"):
alive_node = self.cluster.cluster_nodes[0]
node_shell = alive_node.host.get_shell()
alive_endpoint = alive_node.storage_node.get_rpc_endpoint()
node_frostfs_cli_wo_wallet = FrostfsCli(node_shell, FROSTFS_CLI_EXEC)
rpc_endpoint = alive_node.storage_node.get_rpc_endpoint()
node_frostfs_cli = FrostfsCli(node_shell, FROSTFS_CLI_EXEC)
with reporter.step("Get object nodes with generate key"):
with expect_not_raises():
node_frostfs_cli_wo_wallet.object.nodes(
alive_endpoint,
container,
node_frostfs_cli.object.nodes(
rpc_endpoint,
cid,
oid=oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,

View file

@ -22,8 +22,6 @@ from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...resources.common import S3_POLICY_FILE_LOCATION
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
if "ec_policy" not in metafunc.fixturenames:
@ -41,8 +39,7 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
100: ["EC 12.4", "EC 8.4", "EC 5.3", "EC 4.4"],
}
nearest_node_count = ([4] + (list(filter(lambda x: x <= node_count, ec_map.keys()))))[-1]
metafunc.parametrize("ec_policy, node_count", ((ec_policy, node_count) for ec_policy in ec_map[nearest_node_count]))
metafunc.parametrize("ec_policy, node_count", ((ec_policy, node_count) for ec_policy in ec_map[node_count]))
@allure.title("Initialized remote FrostfsAdm")
@ -680,7 +677,7 @@ class TestECReplication(ClusterTestBase):
)
@allure.title("Create bucket with EC policy (s3_client={s3_client})")
@pytest.mark.parametrize("s3_policy, s3_client", [(S3_POLICY_FILE_LOCATION, AwsCliClient)], indirect=True)
@pytest.mark.parametrize("s3_policy, s3_client", [("pytest_tests/resources/files/policy.json", AwsCliClient)], indirect=True)
def test_create_bucket_with_ec_location(
self, s3_client: S3ClientWrapper, bucket_container_resolver: BucketContainerResolver, grpc_client: GrpcClientWrapper
) -> None:
@ -695,7 +692,7 @@ class TestECReplication(ClusterTestBase):
assert container
@allure.title("Bucket object count chunks (s3_client={s3_client}, size={object_size})")
@pytest.mark.parametrize("s3_policy, s3_client", [(S3_POLICY_FILE_LOCATION, AwsCliClient)], indirect=True)
@pytest.mark.parametrize("s3_policy, s3_client", [("pytest_tests/resources/files/policy.json", AwsCliClient)], indirect=True)
def test_count_chunks_bucket_with_ec_location(
self,
s3_client: S3ClientWrapper,

View file

@ -15,7 +15,7 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from ....helpers.bearer_token import create_bearer_token
from pytest_tests.helpers.bearer_token import create_bearer_token
logger = logging.getLogger("NeoLogger")

View file

@ -20,7 +20,7 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ....helpers.utility import wait_for_gc_pass_on_storage_nodes
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
OBJECT_NOT_FOUND_ERROR = "not found"

View file

@ -23,10 +23,7 @@ from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger")
EXPIRATION_TIMESTAMP_HEADER = "__SYSTEM__EXPIRATION_TIMESTAMP"
# TODO: Depreacated. Use EXPIRATION_EPOCH_ATTRIBUTE from testlib
EXPIRATION_EPOCH_HEADER = "__SYSTEM__EXPIRATION_EPOCH"
EXPIRATION_DURATION_HEADER = "__SYSTEM__EXPIRATION_DURATION"
EXPIRATION_EXPIRATION_RFC = "__SYSTEM__EXPIRATION_RFC3339"
SYSTEM_EXPIRATION_EPOCH = "System-Expiration-Epoch"
@ -154,7 +151,9 @@ class Test_http_system_header(ClusterTestBase):
def test_unable_put_negative_duration(self, user_container: str, simple_object_size: ObjectSize):
headers = attr_into_str_header_curl({"System-Expiration-Duration": "-1h"})
file_path = generate_file(simple_object_size.value)
with reporter.step("Put object using HTTP with attribute System-Expiration-Duration where duration is negative"):
with reporter.step(
"Put object using HTTP with attribute System-Expiration-Duration where duration is negative"
):
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
@ -167,7 +166,9 @@ class Test_http_system_header(ClusterTestBase):
def test_unable_put_expired_timestamp(self, user_container: str, simple_object_size: ObjectSize):
headers = attr_into_str_header_curl({"System-Expiration-Timestamp": "1635075727"})
file_path = generate_file(simple_object_size.value)
with reporter.step("Put object using HTTP with attribute System-Expiration-Timestamp where duration is in the past"):
with reporter.step(
"Put object using HTTP with attribute System-Expiration-Timestamp where duration is in the past"
):
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
@ -176,7 +177,9 @@ class Test_http_system_header(ClusterTestBase):
error_pattern=f"{EXPIRATION_TIMESTAMP_HEADER} must be in the future",
)
@allure.title("[NEGATIVE] Put object using HTTP with attribute System-Expiration-RFC3339 where duration is in the past")
@allure.title(
"[NEGATIVE] Put object using HTTP with attribute System-Expiration-RFC3339 where duration is in the past"
)
def test_unable_put_expired_rfc(self, user_container: str, simple_object_size: ObjectSize):
headers = attr_into_str_header_curl({"System-Expiration-RFC3339": "2021-11-22T09:55:49Z"})
file_path = generate_file(simple_object_size.value)
@ -201,7 +204,9 @@ class Test_http_system_header(ClusterTestBase):
with reporter.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(file_path=file_path, attributes=attributes, user_container=user_container)
oid, head_info = self.oid_header_info_for_object(
file_path=file_path, attributes=attributes, user_container=user_container
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with reporter.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
@ -238,7 +243,9 @@ class Test_http_system_header(ClusterTestBase):
with reporter.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(file_path=file_path, attributes=attributes, user_container=user_container)
oid, head_info = self.oid_header_info_for_object(
file_path=file_path, attributes=attributes, user_container=user_container
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with reporter.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
@ -269,13 +276,17 @@ class Test_http_system_header(ClusterTestBase):
)
attributes = {
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=2),
SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=1, rfc3339=True),
SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(
epoch_duration=epoch_duration, epoch=1, rfc3339=True
),
}
file_path = generate_file(object_size.value)
with reporter.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(file_path=file_path, attributes=attributes, user_container=user_container)
oid, head_info = self.oid_header_info_for_object(
file_path=file_path, attributes=attributes, user_container=user_container
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with reporter.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
@ -303,14 +314,20 @@ class Test_http_system_header(ClusterTestBase):
["simple"],
indirect=True,
)
def test_http_rfc_object_unavailable_after_expir(self, user_container: str, object_size: ObjectSize, epoch_duration: int):
def test_http_rfc_object_unavailable_after_expir(
self, user_container: str, object_size: ObjectSize, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=2, rfc3339=True)}
attributes = {
SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(
epoch_duration=epoch_duration, epoch=2, rfc3339=True
)
}
file_path = generate_file(object_size.value)
with reporter.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"

View file

@ -1,4 +1,3 @@
import string
from datetime import datetime, timedelta
import allure
@ -7,15 +6,8 @@ from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils import string_utils
from frostfs_testlib.utils.file_utils import generate_file
VALID_SYMBOLS_WITHOUT_DOT = string.ascii_lowercase + string.digits + "-"
VALID_AND_INVALID_SYMBOLS = string.ascii_letters + string.punctuation
# TODO: The dot symbol is temporarily not supported.
VALID_SYMBOLS_WITH_DOT = VALID_SYMBOLS_WITHOUT_DOT + "."
@pytest.mark.nightly
@pytest.mark.s3_gate
@ -148,87 +140,3 @@ class TestS3GateBucket:
s3_client.delete_bucket(bucket)
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket)
@allure.title("Create bucket with valid name length (s3_client={s3_client}, length={length})")
@pytest.mark.parametrize("length", [3, 4, 32, 62, 63])
def test_s3_create_bucket_with_valid_length(self, s3_client: S3ClientWrapper, length: int):
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
while not (bucket_name[0].isalnum() and bucket_name[-1].isalnum()):
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
with reporter.step("Create bucket with valid name length"):
s3_client.create_bucket(bucket_name)
with reporter.step("Check bucket name in buckets"):
assert bucket_name in s3_client.list_buckets()
@allure.title("[NEGATIVE] Bucket with invalid name length should not be created (s3_client={s3_client}, length={length})")
@pytest.mark.parametrize("length", [2, 64, 254, 255, 256])
def test_s3_create_bucket_with_invalid_length(self, s3_client: S3ClientWrapper, length: int):
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
while not (bucket_name[0].isalnum() and bucket_name[-1].isalnum()):
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
with reporter.step("Create bucket with invalid name length and catch exception"):
with pytest.raises(Exception, match=".*(?:InvalidBucketName|Invalid bucket name).*"):
s3_client.create_bucket(bucket_name)
@allure.title("[NEGATIVE] Bucket with invalid name should not be created (s3_client={s3_client}, bucket_name={bucket_name})")
@pytest.mark.parametrize(
"bucket_name",
[
"BUCKET-1",
"buckeT-2",
# The following case for AWS CLI is not handled correctly
# "-bucket-3",
"bucket-4-",
".bucket-5",
"bucket-6.",
"bucket..7",
"bucket+8",
"bucket_9",
"bucket 10",
"127.10.5.11",
"xn--bucket-12",
"bucket-13-s3alias",
# The following names can be used in FrostFS but are prohibited by the AWS specification.
# "sthree-bucket-14"
# "sthree-configurator-bucket-15"
# "amzn-s3-demo-bucket-16"
# "sthree-bucket-17"
# "bucket-18--ol-s3"
# "bucket-19--x-s3"
# "bucket-20.mrap"
],
)
def test_s3_create_bucket_with_invalid_name(self, s3_client: S3ClientWrapper, bucket_name: str):
with reporter.step("Create bucket with invalid name and catch exception"):
with pytest.raises(Exception, match=".*(?:InvalidBucketName|Invalid bucket name).*"):
s3_client.create_bucket(bucket_name)
@allure.title("[NEGATIVE] Delete non-empty bucket (s3_client={s3_client})")
def test_s3_check_availability_non_empty_bucket_after_deleting(
self,
bucket: str,
simple_object_size: ObjectSize,
s3_client: S3ClientWrapper,
):
object_path = generate_file(simple_object_size.value)
object_name = s3_helper.object_key_from_file_path(object_path)
with reporter.step("Put object into bucket"):
s3_client.put_object(bucket, object_path)
with reporter.step("Check that object appears in bucket"):
objects = s3_client.list_objects(bucket)
assert objects, f"Expected bucket with object, got empty {objects}"
assert object_name in objects, f"Object {object_name} not found in bucket object list {objects}"
with reporter.step("Try to delete not empty bucket and get error"):
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
s3_client.delete_bucket(bucket)
with reporter.step("Check bucket availability"):
objects = s3_client.list_objects(bucket)
assert objects, f"Expected bucket with object, got empty {objects}"
assert object_name in objects, f"Object {object_name} not found in bucket object list {objects}"

View file

@ -1,4 +1,5 @@
import json
import os
import allure
import pytest
@ -14,12 +15,10 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file
from ....resources.common import S3_POLICY_FILE_LOCATION
@pytest.mark.nightly
@pytest.mark.s3_gate
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
@pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
class TestS3GatePolicy(ClusterTestBase):
@allure.title("Bucket creation with retention policy applied (s3_client={s3_client})")
def test_s3_bucket_location(
@ -101,6 +100,7 @@ class TestS3GatePolicy(ClusterTestBase):
s3_client.get_bucket_policy(bucket)
with reporter.step("Put new policy"):
custom_policy = f"file://{os.getcwd()}/pytest_tests/resources/files/bucket_policy.json"
custom_policy = {
"Version": "2012-10-17",
"Id": "aaaa-bbbb-cccc-dddd",

View file

@ -2,6 +2,7 @@ allure-pytest==2.13.2
allure-python-commons==2.13.2
base58==2.1.0
boto3==1.35.30
botocore==1.19.33
configobj==5.0.6
neo-mamba==1.0.0
pexpect==4.8.0