Compare commits

..

5 commits

85 changed files with 3872 additions and 5736 deletions

108
.devenv.hosting.yaml Normal file
View file

@ -0,0 +1,108 @@
hosts:
- address: localhost
attributes:
sudo_shell: false
plugin_name: docker
healthcheck_plugin_name: basic
attributes:
skip_readiness_check: True
force_transactions: True
services:
- name: frostfs-storage_01
attributes:
container_name: s01
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
wallet_password: ""
volume_name: storage_storage_s01
endpoint_data0: s01.frostfs.devenv:8080
control_endpoint: s01.frostfs.devenv:8081
un_locode: "RU MOW"
- name: frostfs-storage_02
attributes:
container_name: s02
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
wallet_password: ""
volume_name: storage_storage_s02
endpoint_data0: s02.frostfs.devenv:8080
control_endpoint: s02.frostfs.devenv:8081
un_locode: "RU LED"
- name: frostfs-storage_03
attributes:
container_name: s03
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
wallet_password: ""
volume_name: storage_storage_s03
endpoint_data0: s03.frostfs.devenv:8080
control_endpoint: s03.frostfs.devenv:8081
un_locode: "SE STO"
- name: frostfs-storage_04
attributes:
container_name: s04
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
wallet_password: ""
volume_name: storage_storage_s04
endpoint_data0: s04.frostfs.devenv:8080
control_endpoint: s04.frostfs.devenv:8081
un_locode: "FI HEL"
- name: frostfs-s3_01
attributes:
container_name: s3_gate
config_path: ../frostfs-dev-env/services/s3_gate/.s3.env
wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
local_wallet_config_path: ./TemporaryDir/password-s3.yml
local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
wallet_password: "s3"
endpoint_data0: https://s3.frostfs.devenv:8080
- name: frostfs-http_01
attributes:
container_name: http_gate
config_path: ../frostfs-dev-env/services/http_gate/.http.env
wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
wallet_password: "one"
endpoint_data0: http://http.frostfs.devenv
- name: frostfs-ir_01
attributes:
container_name: ir01
config_path: ../frostfs-dev-env/services/ir/.ir.env
wallet_path: ../frostfs-dev-env/services/ir/az.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/ir/az.json
wallet_password: "one"
- name: neo-go_01
attributes:
container_name: morph_chain
config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://morph-chain.frostfs.devenv:30333
- name: main-chain_01
attributes:
container_name: main_chain
config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://main-chain.frostfs.devenv:30333
- name: coredns_01
attributes:
container_name: coredns
clis:
- name: frostfs-cli
exec_path: frostfs-cli

1
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1 @@
* @vdomnich-yadro @dansingjulia @yadro-vavdeev @alexchetaev @abereziny

1
.gitignore vendored
View file

@ -18,7 +18,6 @@ xunit_results.xml
# ignore caches under any path # ignore caches under any path
**/__pycache__ **/__pycache__
**/.pytest_cache **/.pytest_cache
*.egg-info
# ignore work directories and setup files # ignore work directories and setup files
.setup .setup

View file

@ -9,16 +9,6 @@ repos:
hooks: hooks:
- id: isort - id: isort
name: isort (python) name: isort (python)
- repo: https://git.frostfs.info/TrueCloudLab/allure-validator
rev: 1.1.0
hooks:
- id: allure-validator
args: [
"pytest_tests/",
"--plugins",
"frostfs[-_]testlib*",
]
pass_filenames: false
ci: ci:
autofix_prs: false autofix_prs: false

View file

@ -1 +0,0 @@
* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov

View file

View file

@ -1,8 +1,8 @@
[tool.isort] [tool.isort]
profile = "black" profile = "black"
src_paths = ["pytest_tests"] src_paths = ["pytest_tests"]
line_length = 140 line_length = 120
[tool.black] [tool.black]
line-length = 140 line-length = 120
target-version = ["py310"] target-version = ["py310"]

View file

@ -11,17 +11,13 @@ markers =
sanity: test runs in sanity testrun sanity: test runs in sanity testrun
smoke: test runs in smoke testrun smoke: test runs in smoke testrun
# controlling markers # controlling markers
order: manual control of test order no_healthcheck: skip healthcheck for this test
logs_after_session: Make the last test in session
# parametrizing markers
container: specify container details for container creation
# functional markers # functional markers
maintenance: tests for change mode node maintenance: tests for change mode node
container: tests for container creation container: tests for container creation
grpc_api: standard gRPC API tests grpc_api: standard gRPC API tests
grpc_control: tests related to using frostfs-cli control commands grpc_control: tests related to using frostfs-cli control commands
grpc_object_lock: gRPC lock tests grpc_object_lock: gRPC lock tests
grpc_without_user: gRPC without user tests
http_gate: HTTP gate contract http_gate: HTTP gate contract
http_put: HTTP gate test cases with PUT call http_put: HTTP gate test cases with PUT call
s3_gate: All S3 gate tests s3_gate: All S3 gate tests
@ -48,6 +44,7 @@ markers =
failover_network: tests for network failure failover_network: tests for network failure
failover_reboot: tests for system recovery after reboot of a node failover_reboot: tests for system recovery after reboot of a node
interfaces: tests down interface to system interfaces: tests down interface to system
add_nodes: add nodes to cluster
check_binaries: check frostfs installed binaries versions check_binaries: check frostfs installed binaries versions
payments: tests for payment associated operations payments: tests for payment associated operations
load: performance tests load: performance tests
@ -67,7 +64,6 @@ markers =
write_cache_loss: tests for write cache loss write_cache_loss: tests for write cache loss
time: time tests time: time tests
replication: replication tests replication: replication tests
ec_replication: replication EC
static_session_container: tests for a static session in a container static_session_container: tests for a static session in a container
shard: shard management tests shard: shard management tests
session_logs: check logs messages logs_after_session: tests after a session with logs

View file

@ -1,3 +0,0 @@
import os
TESTS_BASE_PATH = os.path.dirname(os.path.relpath(__file__))

View file

@ -1,17 +0,0 @@
import os
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.utils import string_utils
def create_bearer_token(frostfs_cli: FrostfsCli, directory: str, cid: str, rule: ape.Rule, endpoint: str) -> str:
chain_file = os.path.join(directory, string_utils.unique_name("chain-", ".json"))
bearer_token_file = os.path.join(directory, string_utils.unique_name("bt-", ".json"))
signed_bearer_token_file = os.path.join(directory, string_utils.unique_name("bt-sign-", ".json"))
frostfs_cli.bearer.generate_ape_override(rule.chain_id, rule=rule.as_string(), cid=cid, output=chain_file)
frostfs_cli.bearer.create(endpoint, bearer_token_file, issued_at=1, expire_at=9999, ape=chain_file)
frostfs_cli.util.sign_bearer_token(bearer_token_file, signed_bearer_token_file)
return signed_bearer_token_file

View file

@ -1,12 +1,11 @@
import functools from typing import List, Optional
from typing import Optional
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses.acl import EACLOperation
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from ..helpers.object_access import ( from pytest_tests.helpers.object_access import (
can_delete_object, can_delete_object,
can_get_head_object, can_get_head_object,
can_get_object, can_get_object,
@ -16,15 +15,8 @@ from ..helpers.object_access import (
can_search_object, can_search_object,
) )
ALL_OBJECT_OPERATIONS = ape.ObjectOperations.get_all()
FULL_ACCESS = {op: True for op in ALL_OBJECT_OPERATIONS} def check_full_access_to_container(
NO_ACCESS = {op: False for op in ALL_OBJECT_OPERATIONS}
RO_ACCESS = {op: True if op not in [ape.ObjectOperations.PUT, ape.ObjectOperations.DELETE] else False for op in ALL_OBJECT_OPERATIONS}
def assert_access_to_container(
access_matrix: dict[ape.ObjectOperations, bool],
wallet: WalletInfo, wallet: WalletInfo,
cid: str, cid: str,
oid: str, oid: str,
@ -35,23 +27,95 @@ def assert_access_to_container(
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
): ):
endpoint = cluster.default_rpc_endpoint endpoint = cluster.default_rpc_endpoint
results: dict = {} assert can_put_object(wallet, cid, file_name, shell, cluster, bearer, xhdr)
assert can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
assert can_get_range_of_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
assert can_get_range_hash_of_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
assert can_search_object(wallet, cid, shell, endpoint, oid, bearer, xhdr)
assert can_get_object(wallet, cid, oid, file_name, shell, cluster, bearer, xhdr)
assert can_delete_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
results[ape.ObjectOperations.PUT] = can_put_object(wallet, cid, file_name, shell, cluster, bearer, xhdr)
results[ape.ObjectOperations.HEAD] = can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, xhdr) def check_no_access_to_container(
results[ape.ObjectOperations.GET_RANGE] = can_get_range_of_object(wallet, cid, oid, shell, endpoint, bearer, xhdr) wallet: WalletInfo,
results[ape.ObjectOperations.GET_RANGE_HASH] = can_get_range_hash_of_object(wallet, cid, oid, shell, endpoint, bearer, xhdr) cid: str,
results[ape.ObjectOperations.SEARCH] = can_search_object(wallet, cid, shell, endpoint, oid, bearer, xhdr) oid: str,
results[ape.ObjectOperations.GET] = can_get_object(wallet, cid, oid, file_name, shell, cluster, bearer, xhdr) file_name: str,
results[ape.ObjectOperations.DELETE] = can_delete_object(wallet, cid, oid, shell, endpoint, bearer, xhdr) shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
):
endpoint = cluster.default_rpc_endpoint
assert not can_put_object(wallet, cid, file_name, shell, cluster, bearer, xhdr)
assert not can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
assert not can_get_range_of_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
assert not can_get_range_hash_of_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
assert not can_search_object(wallet, cid, shell, endpoint, oid, bearer, xhdr)
assert not can_get_object(wallet, cid, oid, file_name, shell, cluster, bearer, xhdr)
assert not can_delete_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
def check_custom_access_to_container(
wallet: WalletInfo,
cid: str,
oid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
deny_operations: Optional[List[EACLOperation]] = None,
ignore_operations: Optional[List[EACLOperation]] = None,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
):
endpoint = cluster.default_rpc_endpoint
deny_operations = [op.value for op in deny_operations or []]
ignore_operations = [op.value for op in ignore_operations or []]
checks: dict = {}
if EACLOperation.PUT.value not in ignore_operations:
checks[EACLOperation.PUT.value] = can_put_object(wallet, cid, file_name, shell, cluster, bearer, xhdr)
if EACLOperation.HEAD.value not in ignore_operations:
checks[EACLOperation.HEAD.value] = can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
if EACLOperation.GET_RANGE.value not in ignore_operations:
checks[EACLOperation.GET_RANGE.value] = can_get_range_of_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
if EACLOperation.GET_RANGE_HASH.value not in ignore_operations:
checks[EACLOperation.GET_RANGE_HASH.value] = can_get_range_hash_of_object(
wallet, cid, oid, shell, endpoint, bearer, xhdr
)
if EACLOperation.SEARCH.value not in ignore_operations:
checks[EACLOperation.SEARCH.value] = can_search_object(wallet, cid, shell, endpoint, oid, bearer, xhdr)
if EACLOperation.GET.value not in ignore_operations:
checks[EACLOperation.GET.value] = can_get_object(wallet, cid, oid, file_name, shell, cluster, bearer, xhdr)
if EACLOperation.DELETE.value not in ignore_operations:
checks[EACLOperation.DELETE.value] = can_delete_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
failed_checks = [ failed_checks = [
f"allowed {action} failed" for action, success in results.items() if not success and access_matrix[action] != results[action] f"allowed {action} failed"
] + [f"denied {action} succeeded" for action, success in results.items() if success and access_matrix[action] != results[action]] for action, success in checks.items()
if not success and action not in deny_operations
] + [f"denied {action} succeeded" for action, success in checks.items() if success and action in deny_operations]
assert not failed_checks, ", ".join(failed_checks) assert not failed_checks, ", ".join(failed_checks)
assert_full_access_to_container = functools.partial(assert_access_to_container, FULL_ACCESS) def check_read_only_container(
assert_no_access_to_container = functools.partial(assert_access_to_container, NO_ACCESS) wallet: WalletInfo,
assert_read_only_container = functools.partial(assert_access_to_container, RO_ACCESS) cid: str,
oid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
):
return check_custom_access_to_container(
wallet,
cid,
oid,
file_name,
deny_operations=[EACLOperation.PUT, EACLOperation.DELETE],
bearer=bearer,
xhdr=xhdr,
shell=shell,
cluster=cluster,
)

View file

@ -1,23 +0,0 @@
from dataclasses import dataclass
from frostfs_testlib.steps.cli.container import DEFAULT_PLACEMENT_RULE
from frostfs_testlib.storage.cluster import Cluster
@dataclass
class ContainerSpec:
rule: str = DEFAULT_PLACEMENT_RULE
basic_acl: str = None
allow_owner_via_ape: bool = False
def parsed_rule(self, cluster: Cluster):
if self.rule is None:
return None
substitutions = {"%NODE_COUNT%": str(len(cluster.cluster_nodes))}
parsed_rule = self.rule
for sub, replacement in substitutions.items():
parsed_rule = parsed_rule.replace(sub, replacement)
return parsed_rule

View file

@ -43,7 +43,9 @@ def can_get_object(
cluster=cluster, cluster=cluster,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False return False
assert get_file_hash(file_name) == get_file_hash(got_file_path) assert get_file_hash(file_name) == get_file_hash(got_file_path)
return True return True
@ -72,7 +74,9 @@ def can_put_object(
cluster=cluster, cluster=cluster,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False return False
return True return True
@ -98,7 +102,9 @@ def can_delete_object(
endpoint=endpoint, endpoint=endpoint,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False return False
return True return True
@ -126,7 +132,9 @@ def can_get_head_object(
timeout=timeout, timeout=timeout,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False return False
return True return True
@ -155,7 +163,9 @@ def can_get_range_of_object(
timeout=timeout, timeout=timeout,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False return False
return True return True
@ -184,7 +194,9 @@ def can_get_range_hash_of_object(
timeout=timeout, timeout=timeout,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False return False
return True return True
@ -211,7 +223,9 @@ def can_search_object(
timeout=timeout, timeout=timeout,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False return False
if oid: if oid:
return oid in oids return oid in oids

View file

@ -35,16 +35,3 @@ def wait_for_gc_pass_on_storage_nodes() -> None:
wait_time = datetime_utils.parse_time(STORAGE_GC_TIME) wait_time = datetime_utils.parse_time(STORAGE_GC_TIME)
with reporter.step(f"Wait {wait_time}s until GC completes on storage nodes"): with reporter.step(f"Wait {wait_time}s until GC completes on storage nodes"):
time.sleep(wait_time) time.sleep(wait_time)
def are_numbers_similar(num1, num2, tolerance_percentage: float = 1.0):
"""
if difference of numbers is less than permissible deviation than numbers are similar
"""
# Calculate the permissible deviation
average = (num1 + num2) / 2
tolerance = average * (tolerance_percentage / 100)
# Calculate the real difference
difference = abs(num1 - num2)
return difference <= tolerance

View file

@ -1,8 +1,6 @@
import os import os
from .. import TESTS_BASE_PATH
TEST_CYCLES_COUNT = int(os.getenv("TEST_CYCLES_COUNT", "1")) TEST_CYCLES_COUNT = int(os.getenv("TEST_CYCLES_COUNT", "1"))
DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env")) DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env"))
S3_POLICY_FILE_LOCATION = os.path.join(TESTS_BASE_PATH, "resources/files/policy.json") HOSTING_CONFIG_FILE = os.getenv("HOSTING_CONFIG_FILE", ".devenv.hosting.yaml")

View file

@ -1,6 +1,4 @@
{ {
"rep-3": "REP 3", "rep-3": "REP 3",
"rep-1": "REP 1", "complex": "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
"complex": "REP 1 IN X CBF 1 SELECT 1 FROM * AS X",
"ec3.1": "EC 3.1 CBF 1 SELECT 4 FROM *"
} }

View file

@ -1,105 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from ....helpers.container_access import assert_full_access_to_container, assert_no_access_to_container, assert_read_only_container
from ....helpers.container_spec import ContainerSpec
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.acl
class TestACLBasic(ClusterTestBase):
@allure.title("Operations in public container available to everyone (obj_size={object_size})")
@pytest.mark.container(ContainerSpec(basic_acl=PUBLIC_ACL_F))
def test_basic_acl_public(
self,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
client_shell: Shell,
container: str,
file_path: str,
cluster: Cluster,
):
"""
Test access to object operations in public container.
"""
for wallet, role in ((default_wallet, "owner"), (other_wallet, "others")):
with reporter.step("Put objects to container"):
# We create new objects for each wallet because assert_full_access_to_container
# deletes the object
owner_object_oid = put_object_to_random_node(
default_wallet,
file_path,
container,
shell=self.shell,
cluster=self.cluster,
attributes={"created": "owner"},
)
other_object_oid = put_object_to_random_node(
other_wallet,
file_path,
container,
shell=self.shell,
cluster=self.cluster,
attributes={"created": "other"},
)
with reporter.step(f"Check {role} has full access to public container"):
assert_full_access_to_container(wallet, container, owner_object_oid, file_path, client_shell, cluster)
assert_full_access_to_container(wallet, container, other_object_oid, file_path, client_shell, cluster)
@allure.title("Operations in private container only available to owner (obj_size={object_size})")
@pytest.mark.container(ContainerSpec(basic_acl=PRIVATE_ACL_F))
def test_basic_acl_private(
self,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
client_shell: Shell,
container: str,
file_path: str,
cluster: Cluster,
):
"""
Test access to object operations in private container.
"""
with reporter.step("Put object to container"):
owner_object_oid = put_object_to_random_node(default_wallet, file_path, container, client_shell, cluster)
with reporter.step("Check no one except owner has access to operations with container"):
assert_no_access_to_container(other_wallet, container, owner_object_oid, file_path, client_shell, cluster)
with reporter.step("Check owner has full access to private container"):
assert_full_access_to_container(default_wallet, container, owner_object_oid, file_path, self.shell, cluster)
@allure.title("Read operations in readonly container available to others (obj_size={object_size})")
@pytest.mark.container(ContainerSpec(basic_acl=READONLY_ACL_F))
def test_basic_acl_readonly(
self,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
client_shell: Shell,
container: str,
file_path: str,
cluster: Cluster,
):
"""
Test access to object operations in readonly container.
"""
with reporter.step("Put object to container"):
object_oid = put_object_to_random_node(default_wallet, file_path, container, client_shell, cluster)
with reporter.step("Check others has read-only access to operations with container"):
assert_read_only_container(other_wallet, container, object_oid, file_path, client_shell, cluster)
with reporter.step("Check owner has full access to public container"):
assert_full_access_to_container(default_wallet, container, object_oid, file_path, client_shell, cluster)

View file

@ -1,228 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import TestFile
from ....helpers.container_access import (
ALL_OBJECT_OPERATIONS,
assert_access_to_container,
assert_full_access_to_container,
assert_no_access_to_container,
)
from ....helpers.container_spec import ContainerSpec
@pytest.fixture
def denied_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.Role) -> WalletInfo:
return other_wallet if role == ape.Role.OTHERS else default_wallet
@pytest.fixture
def allowed_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.Role) -> WalletInfo:
return default_wallet if role == ape.Role.OTHERS else other_wallet
@pytest.mark.nightly
@pytest.mark.ape
class TestApeContainer(ClusterTestBase):
@pytest.mark.sanity
@allure.title("Deny operations via APE by role (role={role}, obj_size={object_size})")
@pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True)
def test_deny_operations_via_ape_by_role(
self,
denied_wallet: WalletInfo,
allowed_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
container: str,
objects: list[str],
role: ape.Role,
file_path: TestFile,
rpc_endpoint: str,
):
with reporter.step(f"Deny all operations for {role} via APE"):
deny_rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, ape.Condition.by_role(role.value))
frostfs_cli.ape_manager.add(
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step(f"Assert denied role have no access to public container"):
# access checks will try to remove object, so we use .pop() to ensure we have object before deletion
assert_no_access_to_container(denied_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
with reporter.step(f"Assert allowed role have full access to public container"):
assert_full_access_to_container(allowed_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
with reporter.step(f"Remove deny rule from APE"):
frostfs_cli.ape_manager.remove(rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container")
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert allowed role have full access to public container"):
assert_full_access_to_container(allowed_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
with reporter.step("Assert denied role have full access to public container"):
assert_full_access_to_container(denied_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
@allure.title("Deny operations for others via APE excluding single pubkey (obj_size={object_size})")
def test_deny_opeartions_excluding_pubkey(
self,
frostfs_cli: FrostfsCli,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
other_wallet_2: WalletInfo,
container: str,
objects: list[str],
rpc_endpoint: str,
file_path: TestFile,
):
with reporter.step("Add deny APE rules for others except single wallet"):
rule_conditions = [
ape.Condition.by_role(ape.Role.OTHERS),
ape.Condition.by_key(
wallet_utils.get_wallet_public_key(other_wallet_2.path, other_wallet_2.password),
match_type=ape.MatchType.NOT_EQUAL,
),
]
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, rule_conditions)
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert others have no access to public container"):
# access checks will try to remove object, so we use .pop() to ensure we have object before deletion
assert_no_access_to_container(other_wallet, container, objects[0], file_path, self.shell, self.cluster)
with reporter.step("Assert owner have full access to public container"):
assert_full_access_to_container(default_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
with reporter.step("Assert allowed wallet have full access to public container"):
assert_full_access_to_container(other_wallet_2, container, objects.pop(), file_path, self.shell, self.cluster)
@allure.title("Replication works with APE deny rules on OWNER and OTHERS (obj_size={object_size})")
@pytest.mark.container(ContainerSpec(f"REP %NODE_COUNT% IN X CBF 1 SELECT %NODE_COUNT% FROM * AS X", PUBLIC_ACL))
def test_replication_works_with_deny_rules(
self,
default_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
container: str,
rpc_endpoint: str,
file_path: TestFile,
):
with reporter.step("Put object to container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step("Wait for object replication after upload"):
wait_object_replication(container, oid, len(self.cluster.cluster_nodes), self.shell, self.cluster.storage_nodes)
with reporter.step("Add deny APE rules for owner and others"):
rule_conditions = [
ape.Condition.by_role(ape.Role.OWNER),
ape.Condition.by_role(ape.Role.OTHERS),
]
for rule_condition in rule_conditions:
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, rule_condition)
frostfs_cli.ape_manager.add(
rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Drop object"):
drop_object(self.cluster.storage_nodes[0], container, oid)
with reporter.step("Wait for dropped object to be replicated"):
wait_object_replication(container, oid, len(self.cluster.storage_nodes), self.shell, self.cluster.storage_nodes)
@allure.title("Deny operations via APE by role (role=ir, obj_size={object_size})")
def test_deny_operations_via_ape_by_role_ir(
self, frostfs_cli: FrostfsCli, ir_wallet: WalletInfo, container: str, objects: list[str], rpc_endpoint: str, file_path: TestFile
):
default_ir_access = {
ape.ObjectOperations.PUT: False,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: False,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: False,
}
with reporter.step("Assert IR wallet access in default state"):
assert_access_to_container(default_ir_access, ir_wallet, container, objects[0], file_path, self.shell, self.cluster)
with reporter.step("Add deny APE rule with deny all operations for IR role"):
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, [ape.Condition.by_role(ape.Role.IR.value)])
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert IR wallet ignores APE rules"):
assert_access_to_container(default_ir_access, ir_wallet, container, objects[0], file_path, self.shell, self.cluster)
with reporter.step("Remove APE rule"):
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert IR wallet access is restored"):
assert_access_to_container(default_ir_access, ir_wallet, container, objects[0], file_path, self.shell, self.cluster)
@allure.title("Deny operations via APE by role (role=container, obj_size={object_size})")
def test_deny_operations_via_ape_by_role_container(
self,
frostfs_cli: FrostfsCli,
container_node_wallet: WalletInfo,
container: str,
objects: list[str],
rpc_endpoint: str,
file_path: TestFile,
):
access_matrix = {
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: False,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: False,
}
with reporter.step("Assert CONTAINER wallet access in default state"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects[0], file_path, self.shell, self.cluster)
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, ape.Condition.by_role(ape.Role.CONTAINER.value))
with reporter.step(f"Add APE rule with deny all operations for CONTAINER and IR roles"):
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert CONTAINER wallet ignores APE rule"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects[0], file_path, self.shell, self.cluster)
with reporter.step("Remove APE rule"):
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert CONTAINER wallet access after rule was removed"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects[0], file_path, self.shell, self.cluster)

View file

@ -1,398 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
from frostfs_testlib.steps.cli.object import get_object_from_random_node, head_object, put_object_to_random_node
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile
from ....helpers.bearer_token import create_bearer_token
from ....helpers.container_access import (
ALL_OBJECT_OPERATIONS,
FULL_ACCESS,
assert_access_to_container,
assert_full_access_to_container,
assert_no_access_to_container,
)
from ....helpers.container_spec import ContainerSpec
from ....helpers.object_access import OBJECT_ACCESS_DENIED
@pytest.mark.nightly
@pytest.mark.ape
class TestApeFilters(ClusterTestBase):
# SPEC: https://github.com/nspcc-dev/neofs-spec/blob/master/01-arch/07-acl.md
HEADER = {"check_key": "check_value"}
OTHER_HEADER = {"check_key": "other_value"}
ATTRIBUTES = {
"key_one": "check_value",
"x_key": "xvalue",
"check_key": "check_value",
}
OTHER_ATTRIBUTES = {
"key_one": "check_value",
"x_key": "other_value",
"check_key": "other_value",
}
OBJECT_COUNT = 5
RESOURCE_OPERATIONS = [
ape.ObjectOperations.GET,
ape.ObjectOperations.HEAD,
ape.ObjectOperations.PUT,
]
@pytest.fixture
def objects_with_attributes(self, default_wallet: WalletInfo, file_path: TestFile, container: str):
return [
put_object_to_random_node(
default_wallet, file_path, container, self.shell, self.cluster, attributes={**self.ATTRIBUTES, "key": val}
)
for val in range(self.OBJECT_COUNT)
]
@pytest.fixture
def objects_with_other_attributes(self, default_wallet: WalletInfo, file_path: TestFile, container: str):
return [
put_object_to_random_node(
default_wallet, file_path, container, self.shell, self.cluster, attributes={**self.OTHER_ATTRIBUTES, "key": val}
)
for val in range(self.OBJECT_COUNT)
]
@pytest.fixture
def objects_without_attributes(self, default_wallet: WalletInfo, file_path: TestFile, container: str):
return [put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster) for _ in range(self.OBJECT_COUNT)]
@pytest.mark.sanity
@allure.title("Operations with request filter (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
@pytest.mark.skip("https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1243")
def test_ape_filters_request(
self,
frostfs_cli: FrostfsCli,
temp_directory: str,
other_wallet: WalletInfo,
container: str,
objects_with_attributes: list[str],
objects_with_other_attributes: list[str],
objects_without_attributes: list[str],
match_type: ape.MatchType,
file_path: TestFile,
rpc_endpoint: str,
):
with reporter.step("Deny all operations for others via APE with request condition"):
request_condition = ape.Condition('"frostfs:xheader/check_key"', '"check_value"', ape.ConditionType.REQUEST, match_type)
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
deny_rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, [request_condition, role_condition])
frostfs_cli.ape_manager.add(
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Create bearer token with everything allowed for others role"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
# Filter denies requests where "check_key {match_type} ATTRIBUTE", so when match_type
# is STRING_EQUAL, then requests with "check_key=OTHER_ATTRIBUTE" will be allowed while
# requests with "check_key=ATTRIBUTE" will be denied, and vice versa
allow_headers = self.OTHER_HEADER if match_type == ape.MatchType.EQUAL else self.HEADER
deny_headers = self.HEADER if match_type == ape.MatchType.EQUAL else self.OTHER_HEADER
# We test on 3 groups of objects with various headers,
# but APE rule should ignore object headers and only work based on request headers
for oids in [objects_with_attributes, objects_with_other_attributes, objects_without_attributes]:
with reporter.step("Check others has full access when sending request without headers"):
assert_full_access_to_container(other_wallet, container, oids.pop(), file_path, self.shell, self.cluster)
with reporter.step("Check others has full access when sending request with allowed headers"):
assert_full_access_to_container(
other_wallet, container, oids.pop(), file_path, self.shell, self.cluster, xhdr=allow_headers
)
with reporter.step("Check others has no access when sending request with denied headers"):
assert_no_access_to_container(other_wallet, container, oids.pop(), file_path, self.shell, self.cluster, xhdr=deny_headers)
with reporter.step("Check others has full access when sending request with denied headers and using bearer token"):
assert_full_access_to_container(
other_wallet, container, oids.pop(), file_path, self.shell, self.cluster, bearer, deny_headers
)
@allure.title("Operations with deny user headers filter (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
@pytest.mark.skip("https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1300")
def test_ape_deny_filters_object(
self,
frostfs_cli: FrostfsCli,
temp_directory: str,
other_wallet: WalletInfo,
container: str,
objects_with_attributes: list[str],
objects_with_other_attributes: list[str],
objects_without_attributes: list[str],
match_type: ape.MatchType,
rpc_endpoint: str,
file_path: TestFile,
):
allow_objects = objects_with_other_attributes if match_type == ape.MatchType.EQUAL else objects_with_attributes
deny_objects = objects_with_attributes if match_type == ape.MatchType.EQUAL else objects_with_other_attributes
# When there is no attribute on the object, it's the same as "", and "" is not equal to "<some_value>"
# So it's the same as deny_objects
no_attributes_access = {
ape.MatchType.EQUAL: FULL_ACCESS,
ape.MatchType.NOT_EQUAL: {
ape.ObjectOperations.PUT: False,
ape.ObjectOperations.GET: False,
ape.ObjectOperations.HEAD: False,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: False, # Denied by restricted PUT
},
}
allowed_access = {
ape.MatchType.EQUAL: FULL_ACCESS,
ape.MatchType.NOT_EQUAL: {
ape.ObjectOperations.PUT: False, # because currently we are put without attributes
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: False, # Because delete needs to put a tombstone without attributes
},
}
with reporter.step("Deny operations for others via APE with resource condition"):
resource_condition = ape.Condition('"check_key"', '"check_value"', ape.ConditionType.RESOURCE, match_type)
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
deny_rule = ape.Rule(ape.Verb.DENY, self.RESOURCE_OPERATIONS, [resource_condition, role_condition])
frostfs_cli.ape_manager.add(
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Create bearer token with everything allowed for others role"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
with reporter.step("Create bearer token with allowed put for others role"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.PUT, role_condition)
bearer_put = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
# We will attempt requests with various headers,
# but APE rule should ignore request headers and validate only object headers
for xhdr in (self.HEADER, self.OTHER_HEADER, None):
with reporter.step("Check others access to objects without attributes"):
assert_access_to_container(
no_attributes_access[match_type],
other_wallet,
container,
objects_without_attributes.pop(),
file_path,
self.shell,
self.cluster,
xhdr=xhdr,
)
with reporter.step("Check others have full access to objects without deny attribute"):
assert_access_to_container(
allowed_access[match_type], other_wallet, container, allow_objects.pop(), file_path, self.shell, self.cluster, xhdr=xhdr
)
with reporter.step("Check others have no access to objects with deny attribute"):
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
head_object(other_wallet, container, deny_objects[0], self.shell, rpc_endpoint, xhdr=xhdr)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
get_object_from_random_node(other_wallet, container, deny_objects[0], self.shell, self.cluster, xhdr=xhdr)
with reporter.step("Check others have access to objects with deny attribute and using bearer token"):
assert_full_access_to_container(
other_wallet, container, deny_objects.pop(), file_path, self.shell, self.cluster, bearer, xhdr
)
allow_attribute = self.OTHER_HEADER if match_type == ape.MatchType.EQUAL else self.HEADER
with reporter.step("Check others can PUT objects without denied attribute"):
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=allow_attribute)
deny_attribute = self.HEADER if match_type == ape.MatchType.EQUAL else self.OTHER_HEADER
with reporter.step("Check others can not PUT objects with denied attribute"):
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=deny_attribute)
with reporter.step("Check others can PUT objects with denied attribute and using bearer token"):
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer_put, attributes=deny_attribute)
@allure.title("Operations with allow APE rule with resource filters (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
@pytest.mark.container(ContainerSpec(basic_acl="0", allow_owner_via_ape=True))
def test_ape_allow_filters_object(
self,
frostfs_cli: FrostfsCli,
other_wallet: WalletInfo,
container: str,
objects_with_attributes: list[str],
objects_with_other_attributes: list[str],
objects_without_attributes: list[str],
match_type: ape.MatchType,
rpc_endpoint: str,
file_path: TestFile,
temp_directory: str,
):
if match_type == ape.MatchType.EQUAL:
allow_objects = objects_with_attributes
deny_objects = objects_with_other_attributes
allow_attribute = self.HEADER
deny_attribute = self.OTHER_HEADER
no_attributes_match_context = pytest.raises(Exception, match=OBJECT_ACCESS_DENIED)
else:
allow_objects = objects_with_other_attributes
deny_objects = objects_with_attributes
allow_attribute = self.OTHER_HEADER
deny_attribute = self.HEADER
no_attributes_match_context = expect_not_raises()
with reporter.step("Allow operations for others except few operations by resource condition via APE"):
resource_condition = ape.Condition('"check_key"', '"check_value"', ape.ConditionType.RESOURCE, match_type)
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
deny_rule = ape.Rule(ape.Verb.ALLOW, self.RESOURCE_OPERATIONS, [resource_condition, role_condition])
frostfs_cli.ape_manager.add(
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Check GET, PUT and HEAD operations with objects without attributes for OTHERS role"):
oid = objects_without_attributes.pop()
with no_attributes_match_context:
assert head_object(other_wallet, container, oid, self.shell, rpc_endpoint)
with no_attributes_match_context:
assert get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster)
with no_attributes_match_context:
assert put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster)
with reporter.step("Create bearer token with everything allowed for others role"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
with reporter.step("Check others can get and put objects without attributes and using bearer token"):
oid = objects_without_attributes[0]
with expect_not_raises():
head_object(other_wallet, container, oid, self.shell, rpc_endpoint, bearer)
with expect_not_raises():
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
with expect_not_raises():
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer)
with reporter.step("Check others can get and put objects with attributes matching the filter"):
oid = allow_objects.pop()
with expect_not_raises():
head_object(other_wallet, container, oid, self.shell, rpc_endpoint)
with expect_not_raises():
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster)
with expect_not_raises():
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=allow_attribute)
with reporter.step("Check others cannot get and put objects without attributes matching the filter"):
oid = deny_objects[0]
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
head_object(other_wallet, container, oid, self.shell, rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
assert get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
assert put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=deny_attribute)
with reporter.step("Check others can get and put objects without attributes matching the filter with bearer token"):
oid = deny_objects.pop()
with expect_not_raises():
head_object(other_wallet, container, oid, self.shell, rpc_endpoint, bearer)
with expect_not_raises():
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
with expect_not_raises():
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer, attributes=allow_attribute)
@allure.title("PUT and GET object using bearer with objectID in filter (obj_size={object_size}, match_type=NOT_EQUAL)")
@pytest.mark.container(ContainerSpec(basic_acl="0", allow_owner_via_ape=True))
def test_ape_filter_object_id_not_equals(
self,
frostfs_cli: FrostfsCli,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
container: str,
temp_directory: str,
file_path: TestFile,
):
with reporter.step("Put object to container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step("Create bearer token with objectID filter"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
object_condition = ape.Condition.by_object_id(oid, ape.ConditionType.RESOURCE, ape.MatchType.NOT_EQUAL)
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, [role_condition, object_condition])
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, self.cluster.default_rpc_endpoint)
with reporter.step("Others should be able to put object using bearer token"):
with expect_not_raises():
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer)
with reporter.step("Others should not be able to get object matching the filter"):
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
@allure.title("PUT and GET object using bearer with objectID in filter (obj_size={object_size}, match_type=EQUAL)")
@pytest.mark.container(ContainerSpec(basic_acl="0", allow_owner_via_ape=True))
def test_ape_filter_object_id_equals(
self,
frostfs_cli: FrostfsCli,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
container: str,
temp_directory: str,
file_path: TestFile,
):
with reporter.step("Put object to container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step("Create bearer token with objectID filter"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
object_condition = ape.Condition.by_object_id(oid, ape.ConditionType.RESOURCE, ape.MatchType.EQUAL)
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, [role_condition, object_condition])
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, self.cluster.default_rpc_endpoint)
with reporter.step("Others should not be able to put object using bearer token"):
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer)
with reporter.step("Others should be able to get object matching the filter"):
with expect_not_raises():
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)

View file

@ -1,194 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile
from ....helpers.bearer_token import create_bearer_token
from ....helpers.container_access import (
ALL_OBJECT_OPERATIONS,
assert_access_to_container,
assert_full_access_to_container,
assert_no_access_to_container,
)
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.bearer
@pytest.mark.ape
class TestApeBearer(ClusterTestBase):
@allure.title("Operations with BearerToken (role={role}, obj_size={object_size})")
@pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True)
def test_bearer_token_operations(
self,
container: str,
objects: list[str],
frostfs_cli: FrostfsCli,
temp_directory: str,
test_wallet: WalletInfo,
role: ape.Role,
file_path: TestFile,
rpc_endpoint: str,
):
with reporter.step(f"Check {role} has full access to container without bearer token"):
assert_full_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
with reporter.step(f"Deny all operations for everyone via APE"):
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS)
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step(f"Create bearer token with all operations allowed"):
bearer = create_bearer_token(
frostfs_cli,
temp_directory,
container,
rule=ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS),
endpoint=rpc_endpoint,
)
with reporter.step(f"Check {role} without token has no access to all operations with container"):
assert_no_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
with reporter.step(f"Check {role} with token has access to all operations with container"):
assert_full_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster, bearer)
with reporter.step(f"Remove deny rule from APE"):
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step(f"Check {role} without token has access to all operations with container"):
assert_full_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
@allure.title("BearerToken for compound operations (obj_size={object_size})")
def test_bearer_token_compound_operations(
self,
frostfs_cli: FrostfsCli,
temp_directory: str,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
container: tuple[str, list[str], str],
objects: list[str],
rpc_endpoint: str,
file_path: TestFile,
):
"""
Bearer Token COMPLETLY overrides chains set for the specific target.
Thus, any restictions or permissions should be explicitly defined in BT.
"""
wallets_map = {
ape.Role.OWNER: default_wallet,
ape.Role.OTHERS: other_wallet,
}
access_map = {
ape.Role.OWNER: {
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: False,
},
ape.Role.OTHERS: {
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: False,
ape.ObjectOperations.GET_RANGE_HASH: False,
ape.ObjectOperations.SEARCH: False,
ape.ObjectOperations.DELETE: True,
},
}
bt_access_map = {
ape.Role.OWNER: {
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: True,
},
ape.Role.OTHERS: {
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: False,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: False,
ape.ObjectOperations.GET_RANGE_HASH: False,
# Although SEARCH is denied by the APE chain defined in Policy contract,
# Bearer Token COMPLETLY overrides chains set for the specific target.
# Thus, any restictions or permissions should be explicitly defined in BT.
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: True,
},
}
# Operations that we will deny for each role via APE
deny_map = {
ape.Role.OWNER: [ape.ObjectOperations.DELETE],
ape.Role.OTHERS: [
ape.ObjectOperations.SEARCH,
ape.ObjectOperations.GET_RANGE_HASH,
ape.ObjectOperations.GET_RANGE,
],
}
# Operations that we will allow for each role with bearer token
bearer_map = {
ape.Role.OWNER: [
ape.ObjectOperations.DELETE,
ape.ObjectOperations.PUT,
ape.ObjectOperations.GET_RANGE,
],
ape.Role.OTHERS: [
ape.ObjectOperations.GET,
ape.ObjectOperations.GET_RANGE,
ape.ObjectOperations.GET_RANGE_HASH,
],
}
conditions_map = {
ape.Role.OWNER: ape.Condition.by_role(ape.Role.OWNER),
ape.Role.OTHERS: ape.Condition.by_role(ape.Role.OTHERS),
}
verb_map = {ape.Role.OWNER: ape.Verb.ALLOW, ape.Role.OTHERS: ape.Verb.DENY}
for role, operations in deny_map.items():
with reporter.step(f"Add APE deny rule for {role}"):
rule = ape.Rule(ape.Verb.DENY, operations, conditions_map[role])
frostfs_cli.ape_manager.add(
rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
for role, wallet in wallets_map.items():
with reporter.step(f"Assert access to container without bearer token for {role}"):
assert_access_to_container(access_map[role], wallet, container, objects.pop(), file_path, self.shell, self.cluster)
bearer_tokens = {}
for role in wallets_map.keys():
with reporter.step(f"Create bearer token for {role}"):
rule = ape.Rule(verb_map[role], bearer_map[role], conditions_map[role])
bt = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
bearer_tokens[role] = bt
for role, wallet in wallets_map.items():
with reporter.step(f"Assert access to container with bearer token for {role}"):
assert_access_to_container(
bt_access_map[role], wallet, container, objects.pop(), file_path, self.shell, self.cluster, bearer_tokens[role]
)

View file

@ -1,155 +0,0 @@
import json
import time
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.utils import datetime_utils
from ...helpers.container_spec import ContainerSpec
OBJECT_COUNT = 5
@pytest.fixture(scope="session")
def ir_wallet(cluster: Cluster) -> WalletInfo:
return WalletInfo.from_node(cluster.ir_nodes[0])
@pytest.fixture(scope="session")
def storage_wallet(cluster: Cluster) -> WalletInfo:
return WalletInfo.from_node(cluster.storage_nodes[0])
@pytest.fixture(scope="session")
def role(request: pytest.FixtureRequest):
return request.param
@pytest.fixture(scope="session")
def test_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.Role):
role_to_wallet_map = {
ape.Role.OWNER: default_wallet,
ape.Role.OTHERS: other_wallet,
}
assert role in role_to_wallet_map, "Missing wallet with role {role}"
return role_to_wallet_map[role]
@pytest.fixture
def container(
default_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
client_shell: Shell,
cluster: Cluster,
request: pytest.FixtureRequest,
rpc_endpoint: str,
) -> str:
container_spec = _get_container_spec(request)
cid = _create_container_by_spec(default_wallet, client_shell, cluster, rpc_endpoint, container_spec)
if container_spec.allow_owner_via_ape:
_allow_owner_via_ape(frostfs_cli, cluster, cid)
return cid
def _create_container_by_spec(
default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, rpc_endpoint: str, container_spec: ContainerSpec
) -> str:
# TODO: add container spec to step message
with reporter.step("Create container"):
cid = create_container(
default_wallet, client_shell, rpc_endpoint, basic_acl=container_spec.basic_acl, rule=container_spec.parsed_rule(cluster)
)
with reporter.step("Search nodes holding the container"):
container_holder_nodes = search_nodes_with_container(default_wallet, cid, client_shell, cluster.default_rpc_endpoint, cluster)
report_data = {node.id: node.host_ip for node in container_holder_nodes}
reporter.attach(json.dumps(report_data, indent=2), "container_nodes.json")
return cid
def _get_container_spec(request: pytest.FixtureRequest) -> ContainerSpec:
container_marker = request.node.get_closest_marker("container")
# let default container to be public at the moment
container_spec = ContainerSpec(basic_acl=PUBLIC_ACL)
if container_marker:
if len(container_marker.args) != 1:
raise RuntimeError(f"Something wrong with container marker: {container_marker}")
container_spec = container_marker.args[0]
if "param" in request.__dict__:
container_spec = request.param
if not container_spec:
raise RuntimeError(
f"""Container specification is empty.
Either add @pytest.mark.container(ContainerSpec(...)) or
@pytest.mark.parametrize(\"container\", [ContainerSpec(...)], indirect=True) decorator"""
)
return container_spec
def _allow_owner_via_ape(frostfs_cli: FrostfsCli, cluster: Cluster, container: str):
with reporter.step("Create allow APE rule for container owner"):
role_condition = ape.Condition.by_role(ape.Role.OWNER)
deny_rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, role_condition)
frostfs_cli.ape_manager.add(
cluster.default_rpc_endpoint,
deny_rule.chain_id,
target_name=container,
target_type="container",
rule=deny_rule.as_string(),
)
with reporter.step("Wait for one block"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@pytest.fixture
def objects(container: str, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, file_path: str):
with reporter.step("Add test objects to container"):
put_results = parallel(
[put_object_to_random_node] * OBJECT_COUNT,
wallet=default_wallet,
path=file_path,
cid=container,
shell=client_shell,
cluster=cluster,
)
objects_oids = [put_result.result() for put_result in put_results]
return objects_oids
@pytest.fixture
def container_nodes(default_wallet: WalletInfo, container: str, client_shell: Shell, cluster: Cluster) -> list[ClusterNode]:
cid = container
container_holder_nodes = search_nodes_with_container(default_wallet, cid, client_shell, cluster.default_rpc_endpoint, cluster)
report_data = {node.id: node.host_ip for node in container_holder_nodes}
reporter.attach(json.dumps(report_data, indent=2), "container_nodes.json")
return container_holder_nodes
@pytest.fixture
def container_node_wallet(container_nodes: list[ClusterNode]) -> WalletInfo:
return WalletInfo.from_node(container_nodes[0].storage_node)

View file

@ -0,0 +1,97 @@
import os
from dataclasses import dataclass
from datetime import datetime
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils.file_utils import generate_file
OBJECT_COUNT = 5
@dataclass
class Wallets:
wallets: dict[EACLRole, list[WalletInfo]]
def get_wallet(self, role: EACLRole = EACLRole.USER) -> WalletInfo:
return self.wallets[role][0]
def get_wallets_list(self, role: EACLRole = EACLRole.USER) -> list[WalletInfo]:
return self.wallets[role]
@pytest.fixture(scope="module")
def wallets(default_wallet: WalletInfo, credentials_provider: CredentialsProvider, cluster: Cluster) -> Wallets:
other_wallets: list = []
for _ in range(2):
user = User(f"user_{hex(int(datetime.now().timestamp() * 1000000))}")
other_wallets.append(credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0]))
ir_node: InnerRing = cluster.ir_nodes[0]
storage_node: StorageNode = cluster.storage_nodes[0]
wallets_collection = Wallets(
wallets={
EACLRole.USER: [default_wallet],
EACLRole.OTHERS: other_wallets,
EACLRole.SYSTEM: [
WalletInfo.from_node(ir_node),
WalletInfo.from_node(storage_node),
],
}
)
for role, wallets in wallets_collection.wallets.items():
if role == EACLRole.SYSTEM:
continue
for wallet in wallets:
reporter.attach(wallet.path, os.path.basename(wallet.path))
return wallets_collection
@pytest.fixture()
def file_path(object_size: ObjectSize) -> str:
yield generate_file(object_size.value)
@pytest.fixture(scope="function")
def eacl_container_with_objects(
wallets: Wallets, client_shell: Shell, cluster: Cluster, file_path: str
) -> tuple[str, list[str], str]:
user_wallet = wallets.get_wallet()
with reporter.step("Create eACL public container"):
cid = create_container(
user_wallet,
basic_acl=PUBLIC_ACL,
shell=client_shell,
endpoint=cluster.default_rpc_endpoint,
)
with reporter.step("Add test objects to container"):
objects_oids = [
put_object_to_random_node(
user_wallet,
file_path,
cid,
attributes={"key1": "val1", "key": val, "key2": "abc"},
shell=client_shell,
cluster=cluster,
)
for val in range(OBJECT_COUNT)
]
yield cid, objects_oids, file_path
# with reporter.step('Delete eACL public container'):
# delete_container(user_wallet, cid)

View file

@ -0,0 +1,184 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.container_access import (
check_full_access_to_container,
check_no_access_to_container,
check_read_only_container,
)
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.sanity
@pytest.mark.smoke
@pytest.mark.acl
@pytest.mark.acl_basic
class TestACLBasic(ClusterTestBase):
@pytest.fixture(scope="function")
def public_container(self, wallets: Wallets):
user_wallet = wallets.get_wallet()
with reporter.step("Create public container"):
cid_public = create_container(
user_wallet,
basic_acl=PUBLIC_ACL_F,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
yield cid_public
# with reporter.step('Delete public container'):
# delete_container(user_wallet, cid_public)
@pytest.fixture(scope="function")
def private_container(self, wallets: Wallets):
user_wallet = wallets.get_wallet()
with reporter.step("Create private container"):
cid_private = create_container(
user_wallet,
basic_acl=PRIVATE_ACL_F,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
yield cid_private
# with reporter.step('Delete private container'):
# delete_container(user_wallet, cid_private)
@pytest.fixture(scope="function")
def read_only_container(self, wallets: Wallets):
user_wallet = wallets.get_wallet()
with reporter.step("Create public readonly container"):
cid_read_only = create_container(
user_wallet,
basic_acl=READONLY_ACL_F,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
yield cid_read_only
# with reporter.step('Delete public readonly container'):
# delete_container(user_wallet, cid_read_only)
@allure.title("Operations with basic ACL on public container (obj_size={object_size})")
def test_basic_acl_public(self, wallets: Wallets, public_container: str, file_path: str):
"""
Test basic ACL set during public container creation.
"""
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = public_container
for wallet, desc in ((user_wallet, "owner"), (other_wallet, "other users")):
with reporter.step("Add test objects to container"):
# We create new objects for each wallet because check_full_access_to_container
# deletes the object
owner_object_oid = put_object_to_random_node(
user_wallet,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
attributes={"created": "owner"},
)
other_object_oid = put_object_to_random_node(
other_wallet,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
attributes={"created": "other"},
)
with reporter.step(f"Check {desc} has full access to public container"):
check_full_access_to_container(
wallet,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
check_full_access_to_container(
wallet,
cid,
other_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("Operations with basic ACL on PRIVATE container (obj_size={object_size})")
def test_basic_acl_private(self, wallets: Wallets, private_container: str, file_path: str):
"""
Test basic ACL set during private container creation.
"""
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = private_container
with reporter.step("Add test objects to container"):
owner_object_oid = put_object_to_random_node(
user_wallet, file_path, cid, shell=self.shell, cluster=self.cluster
)
with reporter.step("Check only owner has full access to private container"):
with reporter.step("Check no one except owner has access to operations with container"):
check_no_access_to_container(
other_wallet,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step("Check owner has full access to private container"):
check_full_access_to_container(
user_wallet,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("Operations with basic ACL on READONLY container (obj_size={object_size})")
def test_basic_acl_readonly(self, wallets: Wallets, client_shell: Shell, read_only_container: str, file_path: str):
"""
Test basic ACL Operations for Read-Only Container.
"""
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = read_only_container
with reporter.step("Add test objects to container"):
object_oid = put_object_to_random_node(
user_wallet, file_path, cid, shell=client_shell, cluster=self.cluster
)
with reporter.step("Check other has read-only access to operations with container"):
check_read_only_container(
other_wallet,
cid,
object_oid,
file_path,
shell=client_shell,
cluster=self.cluster,
)
with reporter.step("Check owner has full access to public container"):
check_full_access_to_container(
user_wallet,
cid,
object_oid,
file_path,
shell=client_shell,
cluster=self.cluster,
)

View file

@ -0,0 +1,205 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.acl import create_eacl, form_bearertoken_file, set_eacl, wait_for_cache_expired
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.container_access import (
check_custom_access_to_container,
check_full_access_to_container,
check_no_access_to_container,
)
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.sanity
@pytest.mark.acl
@pytest.mark.acl_bearer
class TestACLBearer(ClusterTestBase):
@allure.title("Operations with BearerToken (role={role.value}, obj_size={object_size})")
@pytest.mark.parametrize("role", [EACLRole.USER, EACLRole.OTHERS])
def test_bearer_token_operations(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
role: EACLRole,
):
cid, objects_oids, file_path = eacl_container_with_objects
user_wallet = wallets.get_wallet()
deny_wallet = wallets.get_wallet(role)
endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Check {role.value} has full access to container without bearer token"):
check_full_access_to_container(
deny_wallet,
cid,
objects_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step(f"Set deny all operations for {role.value} via eACL"):
eacl = [EACLRule(access=EACLAccess.DENY, role=role, operation=op) for op in EACLOperation]
eacl_file = create_eacl(cid, eacl, shell=self.shell)
set_eacl(user_wallet, cid, eacl_file, shell=self.shell, endpoint=endpoint)
wait_for_cache_expired()
with reporter.step(f"Create bearer token for {role.value} with all operations allowed"):
bearer = form_bearertoken_file(
user_wallet,
cid,
[EACLRule(operation=op, access=EACLAccess.ALLOW, role=role) for op in EACLOperation],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step(f"Check {role.value} without token has no access to all operations with container"):
check_no_access_to_container(
deny_wallet,
cid,
objects_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step(f"Check {role.value} with token has access to all operations with container"):
check_full_access_to_container(
deny_wallet,
cid,
objects_oids.pop(),
file_path,
bearer=bearer,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step(f"Set allow all operations for {role.value} via eACL"):
eacl = [EACLRule(access=EACLAccess.ALLOW, role=role, operation=op) for op in EACLOperation]
eacl_file = create_eacl(cid, eacl, shell=self.shell)
set_eacl(user_wallet, cid, eacl_file, shell=self.shell, endpoint=endpoint)
wait_for_cache_expired()
with reporter.step(f"Check {role.value} without token has access to all operations with container"):
check_full_access_to_container(
deny_wallet,
cid,
objects_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("BearerToken for compound operations (obj_size={object_size})")
def test_bearer_token_compound_operations(self, wallets: Wallets, eacl_container_with_objects):
endpoint = self.cluster.default_rpc_endpoint
cid, objects_oids, file_path = eacl_container_with_objects
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
# Operations that we will deny for each role via eACL
deny_map = {
EACLRole.USER: [EACLOperation.DELETE],
EACLRole.OTHERS: [
EACLOperation.SEARCH,
EACLOperation.GET_RANGE_HASH,
EACLOperation.GET_RANGE,
],
}
# Operations that we will allow for each role with bearer token
bearer_map = {
EACLRole.USER: [
EACLOperation.DELETE,
EACLOperation.PUT,
EACLOperation.GET_RANGE,
],
EACLRole.OTHERS: [
EACLOperation.GET,
EACLOperation.GET_RANGE,
EACLOperation.GET_RANGE_HASH,
],
}
deny_map_with_bearer = {
EACLRole.USER: [op for op in deny_map[EACLRole.USER] if op not in bearer_map[EACLRole.USER]],
EACLRole.OTHERS: [op for op in deny_map[EACLRole.OTHERS] if op not in bearer_map[EACLRole.OTHERS]],
}
eacl_deny = []
for role, operations in deny_map.items():
eacl_deny += [EACLRule(access=EACLAccess.DENY, role=role, operation=op) for op in operations]
set_eacl(
user_wallet,
cid,
eacl_table_path=create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=endpoint,
)
wait_for_cache_expired()
with reporter.step("Check rule consistency without bearer"):
check_custom_access_to_container(
user_wallet,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map[EACLRole.USER],
shell=self.shell,
cluster=self.cluster,
)
check_custom_access_to_container(
other_wallet,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map[EACLRole.OTHERS],
shell=self.shell,
cluster=self.cluster,
)
with reporter.step("Check rule consistency using bearer token"):
bearer_user = form_bearertoken_file(
user_wallet,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER)
for op in bearer_map[EACLRole.USER]
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
bearer_other = form_bearertoken_file(
user_wallet,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in bearer_map[EACLRole.OTHERS]
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
check_custom_access_to_container(
user_wallet,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map_with_bearer[EACLRole.USER],
bearer=bearer_user,
shell=self.shell,
cluster=self.cluster,
)
check_custom_access_to_container(
other_wallet,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map_with_bearer[EACLRole.OTHERS],
bearer=bearer_other,
shell=self.shell,
cluster=self.cluster,
)

View file

@ -0,0 +1,612 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.acl import create_eacl, set_eacl, wait_for_cache_expired
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.failover_utils import wait_object_replication
from pytest_tests.helpers.container_access import check_full_access_to_container, check_no_access_to_container
from pytest_tests.helpers.object_access import (
can_delete_object,
can_get_head_object,
can_get_object,
can_get_range_hash_of_object,
can_get_range_of_object,
can_put_object,
can_search_object,
)
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.acl
@pytest.mark.acl_extended
class TestEACLContainer(ClusterTestBase):
@pytest.fixture(scope="function")
def eacl_full_placement_container_with_object(self, wallets: Wallets, file_path: str) -> tuple[str, str, str]:
user_wallet = wallets.get_wallet()
storage_nodes = self.cluster.storage_nodes
node_count = len(storage_nodes)
with reporter.step("Create eACL public container with full placement rule"):
full_placement_rule = f"REP {node_count} IN X CBF 1 SELECT {node_count} FROM * AS X"
cid = create_container(
wallet=user_wallet,
rule=full_placement_rule,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step("Add test object to container"):
oid = put_object_to_random_node(user_wallet, file_path, cid, shell=self.shell, cluster=self.cluster)
wait_object_replication(
cid,
oid,
node_count,
shell=self.shell,
nodes=storage_nodes,
)
yield cid, oid, file_path
@pytest.mark.sanity
@allure.title("Deny operations (role={deny_role.value}, obj_size={object_size})")
@pytest.mark.parametrize("deny_role", [EACLRole.USER, EACLRole.OTHERS])
def test_extended_acl_deny_all_operations(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
deny_role: EACLRole,
):
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
deny_role_wallet = other_wallet if deny_role == EACLRole.OTHERS else user_wallet
not_deny_role_wallet = user_wallet if deny_role == EACLRole.OTHERS else other_wallet
deny_role_str = "all others" if deny_role == EACLRole.OTHERS else "user"
not_deny_role_str = "user" if deny_role == EACLRole.OTHERS else "all others"
cid, object_oids, file_path = eacl_container_with_objects
with reporter.step(f"Deny all operations for {deny_role_str} via eACL"):
eacl_deny = [EACLRule(access=EACLAccess.DENY, role=deny_role, operation=op) for op in EACLOperation]
set_eacl(
user_wallet,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
with reporter.step(f"Check only {not_deny_role_str} has full access to container"):
with reporter.step(f"Check {deny_role_str} has not access to any operations with container"):
check_no_access_to_container(
deny_role_wallet,
cid,
object_oids[0],
file_path,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step(f"Check {not_deny_role_str} has full access to eACL public container"):
check_full_access_to_container(
not_deny_role_wallet,
cid,
object_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step(f"Allow all operations for {deny_role_str} via eACL"):
eacl_deny = [EACLRule(access=EACLAccess.ALLOW, role=deny_role, operation=op) for op in EACLOperation]
set_eacl(
user_wallet,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
with reporter.step("Check all have full access to eACL public container"):
check_full_access_to_container(
user_wallet,
cid,
object_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
check_full_access_to_container(
other_wallet,
cid,
object_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("Operations for only one other pubkey (obj_size={object_size})")
def test_extended_acl_deny_all_operations_exclude_pubkey(
self, wallets: Wallets, eacl_container_with_objects: tuple[str, list[str], str]
):
user_wallet = wallets.get_wallet()
other_wallet, other_wallet_allow = wallets.get_wallets_list(EACLRole.OTHERS)[0:2]
cid, object_oids, file_path = eacl_container_with_objects
with reporter.step("Deny all operations for others except single wallet via eACL"):
eacl = [
EACLRule(
access=EACLAccess.ALLOW,
role=other_wallet_allow,
operation=op,
)
for op in EACLOperation
]
eacl += [EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op) for op in EACLOperation]
set_eacl(
user_wallet,
cid,
create_eacl(cid, eacl, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
with reporter.step("Check only owner and allowed other have full access to public container"):
with reporter.step("Check other has not access to operations with container"):
check_no_access_to_container(
other_wallet,
cid,
object_oids[0],
file_path,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step("Check owner has full access to public container"):
check_full_access_to_container(
user_wallet,
cid,
object_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step("Check allowed other has full access to public container"):
check_full_access_to_container(
other_wallet_allow,
cid,
object_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("Replication with eACL deny rules (obj_size={object_size})")
def test_extended_acl_deny_replication(
self,
wallets: Wallets,
eacl_full_placement_container_with_object: tuple[str, list[str], str],
):
user_wallet = wallets.get_wallet()
cid, oid, file_path = eacl_full_placement_container_with_object
storage_nodes = self.cluster.storage_nodes
storage_node = self.cluster.storage_nodes[0]
with reporter.step("Deny all operations for user via eACL"):
eacl_deny = [EACLRule(access=EACLAccess.DENY, role=EACLRole.USER, operation=op) for op in EACLOperation]
eacl_deny += [EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op) for op in EACLOperation]
set_eacl(
user_wallet,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
with reporter.step("Drop object to check replication"):
drop_object(storage_node, cid=cid, oid=oid)
storage_wallet_path = storage_node.get_wallet_path()
with reporter.step("Wait for dropped object replicated"):
wait_object_replication(
cid,
oid,
len(storage_nodes),
self.shell,
storage_nodes,
)
@allure.title("Operations with extended ACL for SYSTEM (obj_size={object_size})")
def test_extended_actions_system(self, wallets: Wallets, eacl_container_with_objects: tuple[str, list[str], str]):
user_wallet = wallets.get_wallet()
ir_wallet, storage_wallet = wallets.get_wallets_list(role=EACLRole.SYSTEM)[:2]
cid, object_oids, file_path = eacl_container_with_objects
endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Check IR and STORAGE rules compliance"):
assert not can_put_object(
ir_wallet,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_put_object(
storage_wallet,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_get_object(
ir_wallet,
cid,
object_oids[0],
file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_get_object(
storage_wallet,
cid,
object_oids[0],
file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_get_head_object(
ir_wallet,
cid,
object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
assert can_get_head_object(
storage_wallet,
cid,
object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
assert can_search_object(
ir_wallet,
cid,
shell=self.shell,
endpoint=endpoint,
oid=object_oids[0],
)
assert can_search_object(
storage_wallet,
cid,
shell=self.shell,
endpoint=endpoint,
oid=object_oids[0],
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
assert can_get_range_hash_of_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
assert can_get_range_hash_of_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with reporter.step("Deny all operations for SYSTEM via eACL"):
set_eacl(
user_wallet,
cid,
create_eacl(
cid=cid,
rules_list=[
EACLRule(access=EACLAccess.DENY, role=EACLRole.SYSTEM, operation=op) for op in EACLOperation
],
shell=self.shell,
),
shell=self.shell,
endpoint=endpoint,
)
wait_for_cache_expired()
with reporter.step("Check IR and STORAGE rules compliance with deny eACL"):
assert not can_put_object(
wallet=ir_wallet,
cid=cid,
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
)
assert not can_put_object(
wallet=storage_wallet,
cid=cid,
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
)
with pytest.raises(AssertionError):
assert can_get_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
)
with pytest.raises(AssertionError):
assert can_get_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
)
with pytest.raises(AssertionError):
assert can_get_head_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_get_head_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_search_object(
wallet=ir_wallet,
cid=cid,
shell=self.shell,
endpoint=endpoint,
oid=object_oids[0],
)
with pytest.raises(AssertionError):
assert can_search_object(
wallet=storage_wallet,
cid=cid,
shell=self.shell,
endpoint=endpoint,
oid=object_oids[0],
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_get_range_hash_of_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_get_range_hash_of_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with reporter.step("Allow all operations for SYSTEM via eACL"):
set_eacl(
user_wallet,
cid,
create_eacl(
cid=cid,
rules_list=[
EACLRule(access=EACLAccess.ALLOW, role=EACLRole.SYSTEM, operation=op) for op in EACLOperation
],
shell=self.shell,
),
shell=self.shell,
endpoint=endpoint,
)
wait_for_cache_expired()
with reporter.step("Check IR and STORAGE rules compliance with allow eACL"):
assert not can_put_object(
wallet=ir_wallet,
cid=cid,
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_put_object(
wallet=storage_wallet,
cid=cid,
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_get_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_get_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_get_head_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
assert can_get_head_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
assert can_search_object(
wallet=ir_wallet,
cid=cid,
shell=self.shell,
oid=object_oids[0],
endpoint=endpoint,
)
assert can_search_object(
wallet=storage_wallet,
cid=cid,
shell=self.shell,
oid=object_oids[0],
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
assert can_get_range_hash_of_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
assert can_get_range_hash_of_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=ir_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=storage_wallet,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
)

View file

@ -0,0 +1,587 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.acl import create_eacl, form_bearertoken_file, set_eacl, wait_for_cache_expired
from frostfs_testlib.steps.cli.container import create_container, delete_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.dataclasses.acl import (
EACLAccess,
EACLFilter,
EACLFilters,
EACLHeaderType,
EACLMatchType,
EACLOperation,
EACLRole,
EACLRule,
)
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.container_access import check_full_access_to_container, check_no_access_to_container
from pytest_tests.helpers.object_access import can_get_head_object, can_get_object, can_put_object
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.acl
@pytest.mark.acl_filters
class TestEACLFilters(ClusterTestBase):
# SPEC: https://github.com/nspcc-dev/neofs-spec/blob/master/01-arch/07-acl.md
ATTRIBUTE = {"check_key": "check_value"}
OTHER_ATTRIBUTE = {"check_key": "other_value"}
SET_HEADERS = {
"key_one": "check_value",
"x_key": "xvalue",
"check_key": "check_value",
}
OTHER_HEADERS = {
"key_one": "check_value",
"x_key": "other_value",
"check_key": "other_value",
}
REQ_EQUAL_FILTER = EACLFilter(key="check_key", value="check_value", header_type=EACLHeaderType.REQUEST)
NOT_REQ_EQUAL_FILTER = EACLFilter(
key="check_key",
value="other_value",
match_type=EACLMatchType.STRING_NOT_EQUAL,
header_type=EACLHeaderType.REQUEST,
)
OBJ_EQUAL_FILTER = EACLFilter(key="check_key", value="check_value", header_type=EACLHeaderType.OBJECT)
NOT_OBJ_EQUAL_FILTER = EACLFilter(
key="check_key",
value="other_value",
match_type=EACLMatchType.STRING_NOT_EQUAL,
header_type=EACLHeaderType.OBJECT,
)
OBJECT_COUNT = 5
OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS = [
EACLOperation.GET,
EACLOperation.HEAD,
EACLOperation.PUT,
]
@pytest.fixture(scope="function")
def eacl_container_with_objects(self, wallets: Wallets, file_path: str):
user_wallet = wallets.get_wallet()
with reporter.step("Create eACL public container"):
cid = create_container(
user_wallet,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step("Add test objects to container"):
objects_with_header = [
put_object_to_random_node(
user_wallet,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
attributes={**self.SET_HEADERS, "key": val},
)
for val in range(self.OBJECT_COUNT)
]
objects_with_other_header = [
put_object_to_random_node(
user_wallet,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
attributes={**self.OTHER_HEADERS, "key": val},
)
for val in range(self.OBJECT_COUNT)
]
objects_without_header = [
put_object_to_random_node(
user_wallet,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
)
for _ in range(self.OBJECT_COUNT)
]
yield cid, objects_with_header, objects_with_other_header, objects_without_header, file_path
with reporter.step("Delete eACL public container"):
delete_container(
user_wallet,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
@pytest.mark.sanity
@allure.title("Operations with request filter (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL])
def test_extended_acl_filters_request(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
match_type: EACLMatchType,
):
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
(
cid,
objects_with_header,
objects_with_other_header,
objects_without_header,
file_path,
) = eacl_container_with_objects
with reporter.step("Deny all operations for other with eACL request filter"):
equal_filter = EACLFilter(**self.REQ_EQUAL_FILTER.__dict__)
equal_filter.match_type = match_type
eacl_deny = [
EACLRule(
access=EACLAccess.DENY,
role=EACLRole.OTHERS,
filters=EACLFilters([equal_filter]),
operation=op,
)
for op in EACLOperation
]
set_eacl(
user_wallet,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
# Filter denies requests where "check_key {match_type} ATTRIBUTE", so when match_type
# is STRING_EQUAL, then requests with "check_key=OTHER_ATTRIBUTE" will be allowed while
# requests with "check_key=ATTRIBUTE" will be denied, and vice versa
allow_headers = self.OTHER_ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.ATTRIBUTE
deny_headers = self.ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.OTHER_ATTRIBUTE
# We test on 3 groups of objects with various headers,
# but eACL rule should ignore object headers and
# work only based on request headers
for oid in (
objects_with_header,
objects_with_other_header,
objects_without_header,
):
with reporter.step("Check other has full access when sending request without headers"):
check_full_access_to_container(
other_wallet,
cid,
oid.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step("Check other has full access when sending request with allowed headers"):
check_full_access_to_container(
other_wallet,
cid,
oid.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=allow_headers,
)
with reporter.step("Check other has no access when sending request with denied headers"):
check_no_access_to_container(
other_wallet,
cid,
oid.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=deny_headers,
)
with reporter.step(
"Check other has full access when sending request " "with denied headers and using bearer token"
):
bearer_other = form_bearertoken_file(
user_wallet,
cid,
[EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) for op in EACLOperation],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
check_full_access_to_container(
other_wallet,
cid,
oid.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=deny_headers,
bearer=bearer_other,
)
@allure.title("Operations with deny user headers filter (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL])
def test_extended_acl_deny_filters_object(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
match_type: EACLMatchType,
):
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
(
cid,
objects_with_header,
objects_with_other_header,
objs_without_header,
file_path,
) = eacl_container_with_objects
with reporter.step("Deny all operations for other with object filter"):
equal_filter = EACLFilter(**self.OBJ_EQUAL_FILTER.__dict__)
equal_filter.match_type = match_type
eacl_deny = [
EACLRule(
access=EACLAccess.DENY,
role=EACLRole.OTHERS,
filters=EACLFilters([equal_filter]),
operation=op,
)
for op in self.OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS
]
set_eacl(
user_wallet,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
allow_objects = objects_with_other_header if match_type == EACLMatchType.STRING_EQUAL else objects_with_header
deny_objects = objects_with_header if match_type == EACLMatchType.STRING_EQUAL else objects_with_other_header
# We will attempt requests with various headers,
# but eACL rule should ignore request headers and validate
# only object headers
for xhdr in (self.ATTRIBUTE, self.OTHER_ATTRIBUTE, None):
with reporter.step("Check other have full access to objects without attributes"):
check_full_access_to_container(
other_wallet,
cid,
objs_without_header.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=xhdr,
)
with reporter.step("Check other have full access to objects without deny attribute"):
check_full_access_to_container(
other_wallet,
cid,
allow_objects.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=xhdr,
)
with reporter.step("Check other have no access to objects with deny attribute"):
with pytest.raises(AssertionError):
assert can_get_head_object(
other_wallet,
cid,
deny_objects[0],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
xhdr=xhdr,
)
with pytest.raises(AssertionError):
assert can_get_object(
other_wallet,
cid,
deny_objects[0],
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=xhdr,
)
with reporter.step("Check other have access to objects with deny attribute and using bearer token"):
bearer_other = form_bearertoken_file(
user_wallet,
cid,
[
EACLRule(
operation=op,
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
)
for op in EACLOperation
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
check_full_access_to_container(
other_wallet,
cid,
deny_objects.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=xhdr,
bearer=bearer_other,
)
allow_attribute = self.OTHER_ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.ATTRIBUTE
with reporter.step("Check other can PUT objects without denied attribute"):
assert can_put_object(
other_wallet,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
attributes=allow_attribute,
)
assert can_put_object(other_wallet, cid, file_path, shell=self.shell, cluster=self.cluster)
deny_attribute = self.ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.OTHER_ATTRIBUTE
with reporter.step("Check other can not PUT objects with denied attribute"):
with pytest.raises(AssertionError):
assert can_put_object(
other_wallet,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
attributes=deny_attribute,
)
with reporter.step("Check other can PUT objects with denied attribute and using bearer token"):
bearer_other_for_put = form_bearertoken_file(
user_wallet,
cid,
[
EACLRule(
operation=EACLOperation.PUT,
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
)
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
assert can_put_object(
other_wallet,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
attributes=deny_attribute,
bearer=bearer_other_for_put,
)
@allure.title("Operations with allow eACL user headers filters (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL])
def test_extended_acl_allow_filters_object(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
match_type: EACLMatchType,
):
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
(
cid,
objects_with_header,
objects_with_other_header,
objects_without_header,
file_path,
) = eacl_container_with_objects
with reporter.step("Deny all operations for others except few operations allowed by object filter"):
equal_filter = EACLFilter(**self.OBJ_EQUAL_FILTER.__dict__)
equal_filter.match_type = match_type
eacl = [
EACLRule(
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
filters=EACLFilters([equal_filter]),
operation=op,
)
for op in self.OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS
] + [
EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op)
for op in self.OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS
]
set_eacl(
user_wallet,
cid,
create_eacl(cid, eacl, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
if match_type == EACLMatchType.STRING_EQUAL:
allow_objects = objects_with_header
deny_objects = objects_with_other_header
allow_attribute = self.ATTRIBUTE
deny_attribute = self.OTHER_ATTRIBUTE
else:
allow_objects = objects_with_other_header
deny_objects = objects_with_header
allow_attribute = self.OTHER_ATTRIBUTE
deny_attribute = self.ATTRIBUTE
with reporter.step("Check other cannot get and put objects without attributes"):
oid = objects_without_header.pop()
with pytest.raises(AssertionError):
assert can_get_head_object(
other_wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with pytest.raises(AssertionError):
assert can_get_object(
other_wallet,
cid,
oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
with pytest.raises(AssertionError):
assert can_put_object(other_wallet, cid, file_path, shell=self.shell, cluster=self.cluster)
with reporter.step("Check other can get and put objects without attributes and using bearer token"):
bearer_other = form_bearertoken_file(
user_wallet,
cid,
[
EACLRule(
operation=op,
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
)
for op in EACLOperation
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
assert can_get_head_object(
other_wallet,
cid,
objects_without_header[0],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
bearer=bearer_other,
)
assert can_get_object(
other_wallet,
cid,
objects_without_header[0],
file_path,
shell=self.shell,
cluster=self.cluster,
bearer=bearer_other,
)
assert can_put_object(
other_wallet,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
bearer=bearer_other,
)
with reporter.step("Check other can get objects with attributes matching the filter"):
oid = allow_objects.pop()
assert can_get_head_object(
other_wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
assert can_get_object(
other_wallet,
cid,
oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_put_object(
other_wallet,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
attributes=allow_attribute,
)
with reporter.step("Check other cannot get objects without attributes matching the filter"):
with pytest.raises(AssertionError):
assert can_get_head_object(
other_wallet,
cid,
deny_objects[0],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with pytest.raises(AssertionError):
assert can_get_object(
other_wallet,
cid,
deny_objects[0],
file_path,
shell=self.shell,
cluster=self.cluster,
)
with pytest.raises(AssertionError):
assert can_put_object(
other_wallet,
cid,
file_path,
attributes=deny_attribute,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step(
"Check other can get objects without attributes matching the filter " "and using bearer token"
):
oid = deny_objects.pop()
assert can_get_head_object(
other_wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
bearer=bearer_other,
)
assert can_get_object(
other_wallet,
cid,
oid,
file_path,
shell=self.shell,
cluster=self.cluster,
bearer=bearer_other,
)
assert can_put_object(
other_wallet,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
attributes=deny_attribute,
bearer=bearer_other,
)

File diff suppressed because it is too large Load diff

View file

@ -3,45 +3,46 @@ import os
import random import random
import shutil import shutil
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from importlib.metadata import entry_points
from typing import Optional from typing import Optional
import allure import allure
import pytest import pytest
import yaml
from dateutil import parser from dateutil import parser
from frostfs_testlib import plugins, reporter from frostfs_testlib import plugins, reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.credentials.interfaces import CredentialsProvider, User from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.hosting import Hosting from frostfs_testlib.hosting import Hosting
from frostfs_testlib.resources import optionals from frostfs_testlib.reporter import AllureHandler, StepsLogger
from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, SIMPLE_OBJECT_SIZE from frostfs_testlib.resources.common import (
ASSETS_DIR,
COMPLEX_OBJECT_CHUNKS_COUNT,
COMPLEX_OBJECT_TAIL_SIZE,
SIMPLE_OBJECT_SIZE,
)
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.shell import LocalShell, Shell from frostfs_testlib.shell import LocalShell, Shell
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, FROSTFS_CLI_EXEC from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE
from frostfs_testlib.steps.cli.object import get_netmap_netinfo from frostfs_testlib.steps.cli.object import get_netmap_netinfo
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage import get_service_registry
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.testing.test_control import run_optionally, wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import env_utils, string_utils, version_utils from frostfs_testlib.utils import env_utils, version_utils
from frostfs_testlib.utils.file_utils import TestFile, generate_file
from ..resources.common import TEST_CYCLES_COUNT from pytest_tests.resources.common import HOSTING_CONFIG_FILE, TEST_CYCLES_COUNT
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
SERVICE_ACTIVE_TIME = 20 SERVICE_ACTIVE_TIME = 20
WALLTETS_IN_POOL = 2
# Add logs check test even if it's not fit to mark selectors # Add logs check test even if it's not fit to mark selectors
def pytest_configure(config: pytest.Config): def pytest_configure(config: pytest.Config):
@ -53,20 +54,16 @@ def pytest_configure(config: pytest.Config):
number_key = pytest.StashKey[str]() number_key = pytest.StashKey[str]()
start_time = pytest.StashKey[int]() start_time = pytest.StashKey[int]()
test_outcome = pytest.StashKey[str]() test_outcome = pytest.StashKey[str]()
# pytest hook. Do not rename # pytest hook. Do not rename
def pytest_collection_modifyitems(items: list[pytest.Item]): def pytest_collection_modifyitems(items: list[pytest.Item]):
# Change order of tests based on @pytest.mark.order(<int>) marker # Make network tests last based on @pytest.mark.node_mgmt and logs_test to be latest
def order(item: pytest.Item) -> int: def priority(item: pytest.Item) -> int:
order_marker = item.get_closest_marker("order") is_node_mgmt_test = 1 if item.get_closest_marker("node_mgmt") else 0
if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)): is_logs_check_test = 100 if item.get_closest_marker("logs_after_session") else 0
raise RuntimeError("Incorrect usage of pytest.mark.order") is_system_time_test = 10 if item.get_closest_marker("time") else 0
return is_node_mgmt_test + is_logs_check_test + is_system_time_test
order_value = order_marker.args[0] if order_marker else 0 items.sort(key=lambda item: priority(item))
return order_value
items.sort(key=lambda item: order(item))
# pytest hook. Do not rename # pytest hook. Do not rename
@ -112,7 +109,27 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
return return
metafunc.fixturenames.append("cycle") metafunc.fixturenames.append("cycle")
metafunc.parametrize("cycle", range(1, TEST_CYCLES_COUNT + 1), ids=[f"cycle {cycle}" for cycle in range(1, TEST_CYCLES_COUNT + 1)]) metafunc.parametrize(
"cycle",
range(1, TEST_CYCLES_COUNT + 1),
ids=[f"cycle {cycle}" for cycle in range(1, TEST_CYCLES_COUNT + 1)],
)
@pytest.fixture(scope="session")
def configure_testlib():
reporter.get_reporter().register_handler(AllureHandler())
reporter.get_reporter().register_handler(StepsLogger())
logging.getLogger("paramiko").setLevel(logging.INFO)
# Register Services for cluster
registry = get_service_registry()
services = entry_points(group="frostfs.testlib.services")
for svc in services:
registry.register_service(svc.name, svc.load())
yield
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@ -120,6 +137,17 @@ def client_shell(configure_testlib) -> Shell:
yield LocalShell() yield LocalShell()
@pytest.fixture(scope="session")
def hosting(configure_testlib) -> Hosting:
with open(HOSTING_CONFIG_FILE, "r") as file:
hosting_config = yaml.full_load(file)
hosting_instance = Hosting()
hosting_instance.configure(hosting_config)
yield hosting_instance
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def require_multiple_hosts(hosting: Hosting): def require_multiple_hosts(hosting: Hosting):
"""Designates tests that require environment with multiple hosts. """Designates tests that require environment with multiple hosts.
@ -148,7 +176,11 @@ def require_multiple_interfaces(cluster: Cluster):
def max_object_size(cluster: Cluster, client_shell: Shell) -> int: def max_object_size(cluster: Cluster, client_shell: Shell) -> int:
storage_node = cluster.storage_nodes[0] storage_node = cluster.storage_nodes[0]
wallet = WalletInfo.from_node(storage_node) wallet = WalletInfo.from_node(storage_node)
net_info = get_netmap_netinfo(wallet=wallet, endpoint=storage_node.get_rpc_endpoint(), shell=client_shell) net_info = get_netmap_netinfo(
wallet=wallet,
endpoint=storage_node.get_rpc_endpoint(),
shell=client_shell,
)
yield net_info["maximum_object_size"] yield net_info["maximum_object_size"]
@ -158,11 +190,6 @@ def simple_object_size(max_object_size: int) -> ObjectSize:
return ObjectSize("simple", size) return ObjectSize("simple", size)
@pytest.fixture()
def file_path(object_size: ObjectSize) -> TestFile:
return generate_file(object_size.value)
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def complex_object_size(max_object_size: int) -> ObjectSize: def complex_object_size(max_object_size: int) -> ObjectSize:
size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE) size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
@ -172,9 +199,12 @@ def complex_object_size(max_object_size: int) -> ObjectSize:
# By default we want all tests to be executed with both object sizes # By default we want all tests to be executed with both object sizes
# This can be overriden in choosen tests if needed # This can be overriden in choosen tests if needed
@pytest.fixture( @pytest.fixture(
scope="session", params=[pytest.param("simple", marks=pytest.mark.simple), pytest.param("complex", marks=pytest.mark.complex)] scope="session",
params=[pytest.param("simple", marks=pytest.mark.simple), pytest.param("complex", marks=pytest.mark.complex)],
) )
def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize, request: pytest.FixtureRequest) -> ObjectSize: def object_size(
simple_object_size: ObjectSize, complex_object_size: ObjectSize, request: pytest.FixtureRequest
) -> ObjectSize:
if request.param == "simple": if request.param == "simple":
return simple_object_size return simple_object_size
@ -191,21 +221,9 @@ def ec_placement_policy() -> PlacementPolicy:
return PlacementPolicy("ec", DEFAULT_EC_PLACEMENT_RULE) return PlacementPolicy("ec", DEFAULT_EC_PLACEMENT_RULE)
@pytest.fixture(scope="session")
@allure.title("Init Frostfs CLI")
def frostfs_cli(client_shell: Shell, default_wallet: WalletInfo) -> FrostfsCli:
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC, default_wallet.config_path)
@pytest.fixture(scope="session")
@allure.title("Init GrpcClientWrapper with local Frostfs CLI")
def grpc_client(frostfs_cli: FrostfsCli) -> GrpcClientWrapper:
return CliClientWrapper(frostfs_cli)
# By default we want all tests to be executed with both storage policies. # By default we want all tests to be executed with both storage policies.
# This can be overriden in choosen tests if needed. # This can be overriden in choosen tests if needed.
@pytest.fixture(scope="session", params=[pytest.param("rep", marks=pytest.mark.rep), pytest.param("ec", marks=pytest.mark.ec)]) @pytest.fixture(scope="session", params=[pytest.param("rep", marks=pytest.mark.rep)])
def placement_policy( def placement_policy(
rep_placement_policy: PlacementPolicy, ec_placement_policy: PlacementPolicy, request: pytest.FixtureRequest rep_placement_policy: PlacementPolicy, ec_placement_policy: PlacementPolicy, request: pytest.FixtureRequest
) -> PlacementPolicy: ) -> PlacementPolicy:
@ -227,8 +245,8 @@ def cluster(temp_directory: str, hosting: Hosting, client_shell: Shell) -> Clust
yield cluster yield cluster
@allure.title("[Session]: Provide S3 policy") @reporter.step("[Class]: Provide S3 policy")
@pytest.fixture(scope="session") @pytest.fixture(scope="class")
def s3_policy(request: pytest.FixtureRequest): def s3_policy(request: pytest.FixtureRequest):
policy = None policy = None
if "param" in request.__dict__: if "param" in request.__dict__:
@ -240,22 +258,17 @@ def s3_policy(request: pytest.FixtureRequest):
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@allure.title("[Session] Create healthcheck object") @allure.title("[Session] Create healthcheck object")
def healthcheck(cluster: Cluster) -> Healthcheck: def healthcheck(cluster: Cluster) -> Healthcheck:
healthcheck_cls = plugins.load_plugin("frostfs.testlib.healthcheck", cluster.cluster_nodes[0].host.config.healthcheck_plugin_name) healthcheck_cls = plugins.load_plugin(
"frostfs.testlib.healthcheck", cluster.cluster_nodes[0].host.config.healthcheck_plugin_name
)
return healthcheck_cls() return healthcheck_cls()
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def cluster_state_controller_session(client_shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> ClusterStateController: def cluster_state_controller(client_shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> ClusterStateController:
controller = ClusterStateController(client_shell, cluster, healthcheck) controller = ClusterStateController(client_shell, cluster, healthcheck)
return controller yield controller
@pytest.fixture
def cluster_state_controller(cluster_state_controller_session: ClusterStateController) -> ClusterStateController:
yield cluster_state_controller_session
cluster_state_controller_session.start_stopped_hosts()
cluster_state_controller_session.start_all_stopped_services()
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@ -263,12 +276,12 @@ def credentials_provider(cluster: Cluster) -> CredentialsProvider:
return CredentialsProvider(cluster) return CredentialsProvider(cluster)
@allure.title("[Session]: Create S3 client") @reporter.step("[Class]: Create S3 client")
@pytest.fixture( @pytest.fixture(
scope="session", scope="class",
params=[ params=[
pytest.param(AwsCliClient, marks=[pytest.mark.aws, pytest.mark.weekly]), pytest.param(AwsCliClient, marks=pytest.mark.aws),
pytest.param(Boto3ClientWrapper, marks=[pytest.mark.boto3, pytest.mark.nightly]), pytest.param(Boto3ClientWrapper, marks=pytest.mark.boto3),
], ],
) )
def s3_client( def s3_client(
@ -282,7 +295,9 @@ def s3_client(
credentials_provider.S3.provide(default_user, node, s3_policy) credentials_provider.S3.provide(default_user, node, s3_policy)
s3_client_cls = request.param s3_client_cls = request.param
client = s3_client_cls(default_user.s3_credentials.access_key, default_user.s3_credentials.secret_key, cluster.default_s3_gate_endpoint) client = s3_client_cls(
default_user.s3_credentials.access_key, default_user.s3_credentials.secret_key, cluster.default_s3_gate_endpoint
)
return client return client
@ -294,83 +309,66 @@ def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus:
return VersioningStatus.UNDEFINED return VersioningStatus.UNDEFINED
@allure.title("[Session] Bulk create buckets for tests") @reporter.step("Create/delete bucket")
@pytest.fixture(scope="session")
def buckets_pool(s3_client: S3ClientWrapper, request: pytest.FixtureRequest):
test_buckets: list = []
s3_client_type = type(s3_client).__name__
for test in request.session.items:
if s3_client_type not in test.name:
continue
if "bucket" in test.fixturenames:
test_buckets.append(string_utils.unique_name("bucket-"))
if "two_buckets" in test.fixturenames:
test_buckets.append(string_utils.unique_name("bucket-"))
test_buckets.append(string_utils.unique_name("bucket-"))
if test_buckets:
parallel(s3_client.create_bucket, test_buckets)
return test_buckets
@allure.title("[Test] Create bucket")
@pytest.fixture @pytest.fixture
def bucket(buckets_pool: list[str], s3_client: S3ClientWrapper, versioning_status: VersioningStatus): def bucket(s3_client: S3ClientWrapper, versioning_status: VersioningStatus, request: pytest.FixtureRequest):
if buckets_pool:
bucket_name = buckets_pool.pop()
else:
bucket_name = s3_client.create_bucket() bucket_name = s3_client.create_bucket()
if versioning_status: if versioning_status:
s3_helper.set_bucket_versioning(s3_client, bucket_name, versioning_status) s3_helper.set_bucket_versioning(s3_client, bucket_name, versioning_status)
return bucket_name yield bucket_name
if "sanity" not in request.config.option.markexpr:
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
@allure.title("[Test] Create two buckets") @reporter.step("Create two buckets")
@pytest.fixture @pytest.fixture
def two_buckets(buckets_pool: list[str], s3_client: S3ClientWrapper) -> list[str]: def two_buckets(s3_client: S3ClientWrapper, request: pytest.FixtureRequest):
buckets: list[str] = [] bucket_1 = s3_client.create_bucket()
bucket_2 = s3_client.create_bucket()
yield bucket_1, bucket_2
for _ in range(2): if "sanity" not in request.config.option.markexpr:
if buckets_pool: for bucket_name in [bucket_1, bucket_2]:
buckets.append(buckets_pool.pop()) s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
else:
buckets.append(s3_client.create_bucket())
return buckets
@allure.title("[Autouse/Session] Collect binary versions") @allure.title("[Autouse/Session] Check binary versions")
@pytest.fixture(scope="session", autouse=True) @pytest.fixture(scope="session", autouse=True)
@run_optionally(optionals.OPTIONAL_AUTOUSE_FIXTURES_ENABLED) def check_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest.FixtureRequest):
def collect_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest.FixtureRequest): local_versions = version_utils.get_local_binaries_versions(client_shell)
remote_versions, exсeptions_remote_binaries_versions = version_utils.get_remote_binaries_versions(hosting)
all_versions = {
**local_versions,
**{binary_name: binary["version"] for binary_name, binary in remote_versions.items()},
}
environment_dir = request.config.getoption("--alluredir") environment_dir = request.config.getoption("--alluredir")
if not environment_dir: if not environment_dir:
return None return None
local_versions = version_utils.get_local_binaries_versions(client_shell)
remote_versions = version_utils.get_remote_binaries_versions(hosting)
remote_versions_keys = list(remote_versions.keys())
all_versions = {
**local_versions,
**{
f"{name}_{remote_versions_keys.index(host) + 1:02d}": version
for host, versions in remote_versions.items()
for name, version in versions.items()
},
}
file_path = f"{environment_dir}/environment.properties" file_path = f"{environment_dir}/environment.properties"
env_utils.save_env_properties(file_path, all_versions) env_utils.save_env_properties(file_path, all_versions)
@reporter.step("Prepare tmp directory")
@pytest.fixture(scope="session")
def temp_directory(configure_testlib):
with reporter.step("Prepare tmp directory"):
full_path = os.path.join(os.getcwd(), ASSETS_DIR)
shutil.rmtree(full_path, ignore_errors=True)
os.mkdir(full_path)
yield full_path
with reporter.step("Remove tmp directory"):
shutil.rmtree(full_path)
@reporter.step("[Autouse/Session] Test session start time") @reporter.step("[Autouse/Session] Test session start time")
@pytest.fixture(scope="session", autouse=True) @pytest.fixture(scope="session", autouse=True)
def session_start_time(configure_testlib): def session_start_time(configure_testlib):
@ -380,20 +378,17 @@ def session_start_time(configure_testlib):
@allure.title("[Autouse/Session] After deploy healthcheck") @allure.title("[Autouse/Session] After deploy healthcheck")
@pytest.fixture(scope="session", autouse=True) @pytest.fixture(scope="session", autouse=True)
@run_optionally(optionals.OPTIONAL_AUTOUSE_FIXTURES_ENABLED)
def after_deploy_healthcheck(cluster: Cluster): def after_deploy_healthcheck(cluster: Cluster):
with reporter.step("Wait for cluster readiness after deploy"): with reporter.step("Wait for cluster readiness after deploy"):
parallel(readiness_on_node, cluster.cluster_nodes) parallel(readiness_on_node, cluster.cluster_nodes)
@pytest.fixture(scope="session")
def rpc_endpoint(cluster: Cluster):
return cluster.default_rpc_endpoint
@wait_for_success(60 * SERVICE_ACTIVE_TIME * 3, 60, title="Wait for {cluster_node} readiness") @wait_for_success(60 * SERVICE_ACTIVE_TIME * 3, 60, title="Wait for {cluster_node} readiness")
def readiness_on_node(cluster_node: ClusterNode): def readiness_on_node(cluster_node: ClusterNode):
if "skip_readiness_check" in cluster_node.host.config.attributes and cluster_node.host.config.attributes["skip_readiness_check"]: if (
"skip_readiness_check" in cluster_node.host.config.attributes
and cluster_node.host.config.attributes["skip_readiness_check"]
):
return return
# TODO: Move to healtcheck classes # TODO: Move to healtcheck classes
@ -403,7 +398,9 @@ def readiness_on_node(cluster_node: ClusterNode):
assert "active" == result.stdout.strip(), f"Service {svc_name} should be in active state" assert "active" == result.stdout.strip(), f"Service {svc_name} should be in active state"
with reporter.step(f"Check service {svc_name} is active more than {SERVICE_ACTIVE_TIME} minutes"): with reporter.step(f"Check service {svc_name} is active more than {SERVICE_ACTIVE_TIME} minutes"):
result = cluster_node.host.get_shell().exec(f"systemctl show {svc_name} --property ActiveEnterTimestamp | cut -d '=' -f 2") result = cluster_node.host.get_shell().exec(
f"systemctl show {svc_name} --property ActiveEnterTimestamp | cut -d '=' -f 2"
)
start_time = parser.parse(result.stdout.strip()) start_time = parser.parse(result.stdout.strip())
current_time = datetime.now(tz=timezone.utc) current_time = datetime.now(tz=timezone.utc)
active_time = current_time - start_time active_time = current_time - start_time
@ -416,10 +413,20 @@ def readiness_on_node(cluster_node: ClusterNode):
), f"Service should be in active state more than {SERVICE_ACTIVE_TIME} minutes, current {active_minutes}m:{active_seconds}s" ), f"Service should be in active state more than {SERVICE_ACTIVE_TIME} minutes, current {active_minutes}m:{active_seconds}s"
@allure.title("[Autouse/Test] Run health check for all nodes")
@pytest.fixture(autouse=True)
def run_health_check(healthcheck: Healthcheck, cluster: Cluster, request: pytest.FixtureRequest):
if request.node.get_closest_marker("no_healthcheck"):
# Skip healthcheck for tests marked with no_healthcheck
return
parallel(healthcheck.storage_healthcheck, cluster.cluster_nodes)
@reporter.step("Prepare default user with wallet") @reporter.step("Prepare default user with wallet")
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def default_user(credentials_provider: CredentialsProvider, cluster: Cluster) -> User: def default_user(credentials_provider: CredentialsProvider, cluster: Cluster) -> User:
user = User(string_utils.unique_name("user-")) # always unique username
user = User(f"user_{hex(int(datetime.now().timestamp() * 1000000))}")
node = cluster.cluster_nodes[0] node = cluster.cluster_nodes[0]
credentials_provider.GRPC.provide(user, node) credentials_provider.GRPC.provide(user, node)
@ -433,39 +440,9 @@ def default_wallet(default_user: User) -> WalletInfo:
return default_user.wallet return default_user.wallet
@pytest.fixture(scope="session")
def wallets_pool(credentials_provider: CredentialsProvider, cluster: Cluster) -> list[WalletInfo]:
users = [User(string_utils.unique_name("user-")) for _ in range(WALLTETS_IN_POOL)]
parallel(credentials_provider.GRPC.provide, users, cluster_node=cluster.cluster_nodes[0])
return [user.wallet for user in users]
@pytest.fixture(scope="session")
def other_wallet(wallets_pool: list[WalletInfo]) -> WalletInfo:
if not wallets_pool:
raise RuntimeError("[other_wallet] No wallets in pool. Consider increasing WALLTETS_IN_POOL or review.")
return wallets_pool.pop()
@pytest.fixture(scope="session")
def other_wallet_2(wallets_pool: list[WalletInfo]) -> WalletInfo:
if not wallets_pool:
raise RuntimeError("[other_wallet2] No wallets in pool. Consider increasing WALLTETS_IN_POOL or review.")
return wallets_pool.pop()
@pytest.fixture() @pytest.fixture()
@allure.title("Select random node for testing") @allure.title("Select random node for testing")
def node_under_test(cluster: Cluster) -> ClusterNode: def node_under_test(cluster: Cluster) -> ClusterNode:
selected_node = random.choice(cluster.cluster_nodes) selected_node = random.choice(cluster.cluster_nodes)
reporter.attach(f"{selected_node}", "Selected node") reporter.attach(f"{selected_node}", "Selected node")
return selected_node return selected_node
@allure.title("Init bucket container resolver")
@pytest.fixture()
def bucket_container_resolver(node_under_test: ClusterNode) -> BucketContainerResolver:
resolver_cls = plugins.load_plugin("frostfs.testlib.bucket_cid_resolver", node_under_test.host.config.product)
resolver: BucketContainerResolver = resolver_cls()
return resolver

View file

@ -13,17 +13,18 @@ from frostfs_testlib.steps.cli.container import (
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from ...helpers.utility import placement_policy_from_container from pytest_tests.helpers.utility import placement_policy_from_container
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.container @pytest.mark.container
@pytest.mark.sanity
class TestContainer(ClusterTestBase): class TestContainer(ClusterTestBase):
@allure.title("Create container (name={name})")
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"]) @pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
@pytest.mark.smoke @pytest.mark.smoke
def test_container_creation(self, default_wallet: WalletInfo, name: str): def test_container_creation(self, default_wallet: WalletInfo, name: str):
scenario_title = "with name" if name else "without name"
allure.dynamic.title(f"Create container {scenario_title}")
wallet = default_wallet wallet = default_wallet
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
@ -58,7 +59,9 @@ class TestContainer(ClusterTestBase):
with reporter.step("Check container has correct information"): with reporter.step("Check container has correct information"):
expected_policy = placement_rule.casefold() expected_policy = placement_rule.casefold()
actual_policy = placement_policy_from_container(container_info) actual_policy = placement_policy_from_container(container_info)
assert actual_policy == expected_policy, f"Expected policy\n{expected_policy} but got policy\n{actual_policy}" assert (
actual_policy == expected_policy
), f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
for info in info_to_check: for info in info_to_check:
expected_info = info.casefold() expected_info = info.casefold()
@ -109,6 +112,10 @@ class TestContainer(ClusterTestBase):
with reporter.step("Delete containers and check they were deleted"): with reporter.step("Delete containers and check they were deleted"):
for cid in cids: for cid in cids:
delete_container(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, await_mode=True) delete_container(
containers_list = list_containers(wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, await_mode=True
)
containers_list = list_containers(
wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
assert cid not in containers_list, "Container not deleted" assert cid not in containers_list, "Container not deleted"

View file

@ -16,11 +16,15 @@ from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.cli_utils import parse_netmap_output from frostfs_testlib.utils.cli_utils import parse_netmap_output
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.utility import placement_policy_from_container from pytest_tests.helpers.utility import placement_policy_from_container
from ...resources.policy_error_patterns import NOT_ENOUGH_TO_SELECT, NOT_FOUND_FILTER, NOT_FOUND_SELECTOR, NOT_PARSE_POLICY from pytest_tests.resources.policy_error_patterns import (
NOT_ENOUGH_TO_SELECT,
NOT_FOUND_FILTER,
NOT_FOUND_SELECTOR,
NOT_PARSE_POLICY,
)
@pytest.mark.nightly
@pytest.mark.container @pytest.mark.container
@pytest.mark.policy @pytest.mark.policy
class TestPolicy(ClusterTestBase): class TestPolicy(ClusterTestBase):
@ -35,10 +39,10 @@ class TestPolicy(ClusterTestBase):
return True return True
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def fill_field_price(self, cluster_state_controller_session: ClusterStateController): def fill_field_price(self, cluster_state_controller: ClusterStateController):
prices = ["15", "10", "65", "55"] prices = ["15", "10", "65", "55"]
config_manager = cluster_state_controller_session.manager(ConfigStateManager) config_manager = cluster_state_controller.manager(ConfigStateManager)
for i in zip(self.cluster.cluster_nodes, prices): for i in zip(self.cluster.cluster_nodes, prices):
config_manager.set_on_node(i[0], StorageNode, {"node:attribute_5": f"Price:{i[1]}"}) config_manager.set_on_node(i[0], StorageNode, {"node:attribute_5": f"Price:{i[1]}"})
@ -47,7 +51,7 @@ class TestPolicy(ClusterTestBase):
yield yield
cluster_state_controller_session.manager(ConfigStateManager).revert_all() cluster_state_controller.manager(ConfigStateManager).revert_all()
@allure.title("[NEGATIVE] Placement policy: Can't parse placement policy") @allure.title("[NEGATIVE] Placement policy: Can't parse placement policy")
def test_placement_policy_negative(self, default_wallet): def test_placement_policy_negative(self, default_wallet):
@ -89,9 +93,7 @@ class TestPolicy(ClusterTestBase):
""" """
Negative test for placement policy: Filter not found. Negative test for placement policy: Filter not found.
""" """
placement_rule = ( placement_rule = "REP 2 IN HALF CBF 1 SELECT 2 FROM GT15 AS HALF FILTER @NOTRU AND Price GT 15 AS GT15 FILTER CountryCode NE RU AS NOTRU"
"REP 2 IN HALF CBF 1 SELECT 2 FROM GT15 AS HALF FILTER @NOTRU AND Price GT 15 AS GT15 FILTER CountryCode NE RU AS NOTRU"
)
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
with pytest.raises(Exception, match=NOT_FOUND_FILTER): with pytest.raises(Exception, match=NOT_FOUND_FILTER):
@ -135,7 +137,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -145,7 +149,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -168,7 +174,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -178,7 +186,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -202,7 +212,9 @@ class TestPolicy(ClusterTestBase):
placement_params = {"country": "Russia"} placement_params = {"country": "Russia"}
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -212,7 +224,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -245,7 +259,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -255,7 +271,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -291,7 +309,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -301,7 +321,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -337,7 +359,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -347,7 +371,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -380,7 +406,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -390,7 +418,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -413,7 +443,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -423,7 +455,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -448,7 +482,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -458,7 +494,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -492,7 +530,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -502,7 +542,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -538,7 +580,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -548,7 +592,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -582,7 +628,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -592,7 +640,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -615,7 +665,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -625,7 +677,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -650,7 +704,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -660,7 +716,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -688,16 +746,16 @@ class TestPolicy(ClusterTestBase):
""" """
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 50% of available nodes. This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 50% of available nodes.
""" """
placement_rule = ( placement_rule = "REP 2 IN HALF CBF 2 SELECT 2 FROM GE15 AS HALF FILTER CountryCode NE RU AS NOTRU FILTER @NOTRU AND Price GE 15 AS GE15"
"REP 2 IN HALF CBF 2 SELECT 2 FROM GE15 AS HALF FILTER CountryCode NE RU AS NOTRU FILTER @NOTRU AND Price GE 15 AS GE15"
)
placement_params = {"Price": 15, "country_code": "RU"} placement_params = {"Price": 15, "country_code": "RU"}
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
expected_copies = 2 expected_copies = 2
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -707,12 +765,16 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap) netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected not with country code '{placement_params['country_code']}'"): with reporter.step(
f"Check two nodes are selected not with country code '{placement_params['country_code']}'"
):
for node in resulting_copies: for node in resulting_copies:
node_address = node.get_rpc_endpoint().split(":")[0] node_address = node.get_rpc_endpoint().split(":")[0]
assert ( assert (
@ -744,7 +806,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -754,7 +818,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -796,7 +862,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -806,7 +874,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -829,7 +899,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -839,7 +911,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -864,7 +938,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -874,7 +950,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -902,16 +980,16 @@ class TestPolicy(ClusterTestBase):
""" """
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 75% of available nodes. This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 75% of available nodes.
""" """
placement_rule = ( placement_rule = "REP 2 IN NODES75 SELECT 2 FROM LT65 AS NODES75 FILTER Continent NE America AS NOAM FILTER @NOAM AND Price LT 65 AS LT65"
"REP 2 IN NODES75 SELECT 2 FROM LT65 AS NODES75 FILTER Continent NE America AS NOAM FILTER @NOAM AND Price LT 65 AS LT65"
)
placement_params = {"Price": 65, "continent": "America"} placement_params = {"Price": 65, "continent": "America"}
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
expected_copies = 2 expected_copies = 2
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -921,7 +999,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -959,7 +1039,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -969,7 +1051,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -1010,7 +1094,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -1020,7 +1106,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -1043,7 +1131,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -1053,7 +1143,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -1078,7 +1170,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -1088,7 +1182,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -1121,7 +1217,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -1131,7 +1229,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -1168,7 +1268,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -1178,7 +1280,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -1211,7 +1315,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -1221,7 +1327,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -1244,7 +1352,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -1254,7 +1364,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"): with reporter.step(f"Delete the object from the container"):
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint) delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=endpoint)
@ -1278,7 +1390,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -1288,7 +1402,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -1324,7 +1440,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -1334,7 +1452,9 @@ class TestPolicy(ClusterTestBase):
with reporter.step(f"Check object expected copies"): with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) resulting_copies = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}" assert (
len(resulting_copies) == expected_copies
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"): with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell)) netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
@ -1370,7 +1490,9 @@ class TestPolicy(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
with reporter.step(f"Create container with policy {placement_rule}"): with reporter.step(f"Create container with policy {placement_rule}"):
cid = create_container(wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint) cid = create_container(
wallet=default_wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
with reporter.step(f"Check container policy"): with reporter.step(f"Check container policy"):
self.validate_object_policy(default_wallet, placement_rule, cid, endpoint) self.validate_object_policy(default_wallet, placement_rule, cid, endpoint)
@ -1406,7 +1528,9 @@ class TestPolicy(ClusterTestBase):
delete_container(wallet=default_wallet, cid=cid, shell=self.shell, endpoint=endpoint) delete_container(wallet=default_wallet, cid=cid, shell=self.shell, endpoint=endpoint)
def validate_object_policy(self, wallet: str, placement_rule: str, cid: str, endpoint: str): def validate_object_policy(self, wallet: str, placement_rule: str, cid: str, endpoint: str):
got_policy = placement_policy_from_container(get_container(wallet, cid, json_mode=False, shell=self.shell, endpoint=endpoint)) got_policy = placement_policy_from_container(
get_container(wallet, cid, json_mode=False, shell=self.shell, endpoint=endpoint)
)
assert got_policy.replace("'", "") == placement_rule.replace( assert got_policy.replace("'", "") == placement_rule.replace(
"'", "" "'", ""
), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same" ), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same"

View file

@ -6,7 +6,7 @@ import pytest
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers import ShardsWatcher from frostfs_testlib.storage.controllers import ShardsWatcher
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import TestFile, generate_file from frostfs_testlib.utils.file_utils import generate_file
@pytest.fixture() @pytest.fixture()
@ -25,6 +25,7 @@ def test_start_time() -> datetime:
@pytest.fixture() @pytest.fixture()
@allure.title("Generate simple size file") @allure.title("Generate simple size file")
def simple_file(simple_object_size: ObjectSize) -> TestFile: def simple_file(simple_object_size: ObjectSize) -> str:
path_file = generate_file(size=simple_object_size.value) path_file = generate_file(size=simple_object_size.value)
return path_file yield path_file
os.remove(path_file)

View file

@ -11,7 +11,7 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
@pytest.mark.order(20) @pytest.mark.time
@pytest.mark.failover @pytest.mark.failover
class TestTime(ClusterTestBase): class TestTime(ClusterTestBase):
@reporter.step("Neo-go should continue to release blocks") @reporter.step("Neo-go should continue to release blocks")

View file

@ -1,4 +1,5 @@
import logging import logging
import os
import random import random
from time import sleep from time import sleep
@ -8,7 +9,13 @@ from frostfs_testlib import reporter
from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE, PUBLIC_ACL from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE, PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, neo_go_query_height, put_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import (
get_object,
get_object_nodes,
neo_go_query_height,
put_object,
put_object_to_random_node,
)
from frostfs_testlib.steps.storage_object import delete_objects from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers import ClusterStateController from frostfs_testlib.storage.controllers import ClusterStateController
@ -25,6 +32,7 @@ STORAGE_NODE_COMMUNICATION_PORT = "8080"
STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082" STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082"
PORTS_TO_BLOCK = [STORAGE_NODE_COMMUNICATION_PORT, STORAGE_NODE_COMMUNICATION_PORT_TLS] PORTS_TO_BLOCK = [STORAGE_NODE_COMMUNICATION_PORT, STORAGE_NODE_COMMUNICATION_PORT_TLS]
blocked_nodes: list[ClusterNode] = [] blocked_nodes: list[ClusterNode] = []
file_wait_delete = []
OBJECT_ATTRIBUTES = [ OBJECT_ATTRIBUTES = [
None, None,
@ -55,6 +63,14 @@ class TestFailoverNetwork(ClusterTestBase):
yield yield
cluster_state_controller.restore_interfaces() cluster_state_controller.restore_interfaces()
@pytest.fixture()
@allure.title("Delete file after test")
def delete_file_after_test(self) -> None:
yield
for path in file_wait_delete:
os.remove(path)
file_wait_delete.clear()
@pytest.fixture() @pytest.fixture()
def storage_objects( def storage_objects(
self, self,
@ -97,7 +113,9 @@ class TestFailoverNetwork(ClusterTestBase):
storage_objects.append(storage_object) storage_objects.append(storage_object)
return storage_objects yield storage_objects
delete_objects(storage_objects, self.shell, self.cluster)
@allure.title("Block Storage node traffic") @allure.title("Block Storage node traffic")
def test_block_storage_node_traffic( def test_block_storage_node_traffic(
@ -156,7 +174,9 @@ class TestFailoverNetwork(ClusterTestBase):
assert node.storage_node not in new_nodes assert node.storage_node not in new_nodes
with reporter.step("Check object data is not corrupted"): with reporter.step("Check object data is not corrupted"):
got_file_path = get_object(wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell) got_file_path = get_object(
wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path) assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
with reporter.step(f"Unblock incoming traffic"): with reporter.step(f"Unblock incoming traffic"):
@ -164,7 +184,9 @@ class TestFailoverNetwork(ClusterTestBase):
with reporter.step(f"Unblock at host {node}"): with reporter.step(f"Unblock at host {node}"):
cluster_state_controller.restore_traffic(node=node) cluster_state_controller.restore_traffic(node=node)
block_node = [ block_node = [
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node == node.storage_node cluster_node
for cluster_node in self.cluster.cluster_nodes
if cluster_node.storage_node == node.storage_node
] ]
blocked_nodes.remove(*block_node) blocked_nodes.remove(*block_node)
sleep(wakeup_node_timeout) sleep(wakeup_node_timeout)
@ -182,6 +204,7 @@ class TestFailoverNetwork(ClusterTestBase):
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
default_wallet: WalletInfo, default_wallet: WalletInfo,
restore_down_interfaces: None, restore_down_interfaces: None,
delete_file_after_test: None,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
): ):
storage_object = storage_objects[0] storage_object = storage_objects[0]
@ -208,7 +231,7 @@ class TestFailoverNetwork(ClusterTestBase):
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2) self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
with reporter.step("Get object for target nodes to data interfaces, expect false"): with reporter.step("Get object for target nodes to data interfaces, expect false"):
with pytest.raises(RuntimeError, match="can't create API client: can't init SDK client: gRPC dial: context deadline exceeded"): with pytest.raises(RuntimeError, match="return code: 1"):
get_object( get_object(
wallet=default_wallet, wallet=default_wallet,
cid=storage_object.cid, cid=storage_object.cid,
@ -225,6 +248,7 @@ class TestFailoverNetwork(ClusterTestBase):
shell=self.shell, shell=self.shell,
endpoint=nodes_without_an_object[0].storage_node.get_rpc_endpoint(), endpoint=nodes_without_an_object[0].storage_node.get_rpc_endpoint(),
) )
file_wait_delete.append(input_file)
with reporter.step("Restore interface and tick 1 epoch, wait 2 block"): with reporter.step("Restore interface and tick 1 epoch, wait 2 block"):
cluster_state_controller.restore_interfaces() cluster_state_controller.restore_interfaces()
@ -237,6 +261,7 @@ class TestFailoverNetwork(ClusterTestBase):
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
default_wallet: WalletInfo, default_wallet: WalletInfo,
restore_down_interfaces: None, restore_down_interfaces: None,
delete_file_after_test: None,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
): ):
@ -264,7 +289,7 @@ class TestFailoverNetwork(ClusterTestBase):
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2) self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
with reporter.step("Get object others node, expect false"): with reporter.step("Get object others node, expect false"):
with pytest.raises(RuntimeError, match="rpc error"): with pytest.raises(RuntimeError, match="return code: 1"):
get_object( get_object(
wallet=default_wallet, wallet=default_wallet,
cid=storage_object.cid, cid=storage_object.cid,
@ -274,7 +299,7 @@ class TestFailoverNetwork(ClusterTestBase):
) )
with reporter.step("Put object, others node, expect false"): with reporter.step("Put object, others node, expect false"):
with pytest.raises(RuntimeError, match="rpc error"): with pytest.raises(RuntimeError, match="return code: 1"):
put_object( put_object(
wallet=default_wallet, wallet=default_wallet,
path=storage_object.file_path, path=storage_object.file_path,
@ -291,6 +316,7 @@ class TestFailoverNetwork(ClusterTestBase):
shell=self.shell, shell=self.shell,
endpoint=nodes_with_object[0].storage_node.get_rpc_endpoint(), endpoint=nodes_with_object[0].storage_node.get_rpc_endpoint(),
) )
file_wait_delete.append(input_file)
with reporter.step(f"Put object nodes with object, expect true"): with reporter.step(f"Put object nodes with object, expect true"):
temp_file_path = generate_file(simple_object_size.value) temp_file_path = generate_file(simple_object_size.value)
@ -301,7 +327,7 @@ class TestFailoverNetwork(ClusterTestBase):
shell=self.shell, shell=self.shell,
endpoint=nodes_with_object[0].storage_node.get_rpc_endpoint(), endpoint=nodes_with_object[0].storage_node.get_rpc_endpoint(),
) )
file_wait_delete.append(temp_file_path)
with reporter.step("Restore interface and tick 1 epoch, wait 2 block"): with reporter.step("Restore interface and tick 1 epoch, wait 2 block"):
cluster_state_controller.restore_interfaces() cluster_state_controller.restore_interfaces()
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2) self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
@ -319,6 +345,7 @@ class TestFailoverNetwork(ClusterTestBase):
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
default_wallet: WalletInfo, default_wallet: WalletInfo,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
delete_file_after_test: None,
restore_down_interfaces: None, restore_down_interfaces: None,
block_interface: Interfaces, block_interface: Interfaces,
other_interface: Interfaces, other_interface: Interfaces,
@ -340,6 +367,7 @@ class TestFailoverNetwork(ClusterTestBase):
with reporter.step("Put object"): with reporter.step("Put object"):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_wait_delete.append(file_path)
oid = put_object( oid = put_object(
wallet=default_wallet, wallet=default_wallet,
@ -357,6 +385,7 @@ class TestFailoverNetwork(ClusterTestBase):
shell=self.shell, shell=self.shell,
endpoint=f"{cluster_nodes[0].get_data_interface(other_interface.value)[0]}:8080", endpoint=f"{cluster_nodes[0].get_data_interface(other_interface.value)[0]}:8080",
) )
file_wait_delete.append(file_get_path)
with reporter.step("Restore interfaces all nodes"): with reporter.step("Restore interfaces all nodes"):
cluster_state_controller.restore_interfaces() cluster_state_controller.restore_interfaces()
@ -372,6 +401,7 @@ class TestFailoverNetwork(ClusterTestBase):
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
default_wallet: WalletInfo, default_wallet: WalletInfo,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
delete_file_after_test: None,
restore_down_interfaces: None, restore_down_interfaces: None,
interface: Interfaces, interface: Interfaces,
): ):
@ -400,6 +430,7 @@ class TestFailoverNetwork(ClusterTestBase):
with reporter.step(f"Put object, after down {interface}"): with reporter.step(f"Put object, after down {interface}"):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_wait_delete.append(file_path)
oid = put_object( oid = put_object(
wallet=default_wallet, wallet=default_wallet,
@ -417,6 +448,7 @@ class TestFailoverNetwork(ClusterTestBase):
shell=self.shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint, endpoint=self.cluster.default_rpc_endpoint,
) )
file_wait_delete.append(file_get_path)
now_block = {} now_block = {}

View file

@ -1,6 +1,5 @@
import itertools
import logging import logging
import os import os.path
import random import random
import allure import allure
@ -16,8 +15,9 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel, parallel_workers_limit from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import get_file_hash from frostfs_testlib.utils.file_utils import get_file_hash
from pytest import FixtureRequest from pytest import FixtureRequest
@ -35,7 +35,7 @@ class TestFailoverServer(ClusterTestBase):
def wait_node_in_map(self, *args, **kwargs): def wait_node_in_map(self, *args, **kwargs):
check_node_in_map(*args, **kwargs) check_node_in_map(*args, **kwargs)
@allure.title("[Test] Create containers") @reporter.step("Create {count_containers} containers and {count_files} objects")
@pytest.fixture @pytest.fixture
def containers( def containers(
self, self,
@ -45,23 +45,22 @@ class TestFailoverServer(ClusterTestBase):
placement_rule = "REP 2 CBF 2 SELECT 2 FROM *" placement_rule = "REP 2 CBF 2 SELECT 2 FROM *"
containers_count = request.param containers = []
results = parallel(
[create_container for _ in range(containers_count)], for _ in range(request.param):
wallet=default_wallet, cont_id = create_container(
default_wallet,
shell=self.shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint, endpoint=self.cluster.default_rpc_endpoint,
rule=placement_rule, rule=placement_rule,
basic_acl=PUBLIC_ACL, basic_acl=PUBLIC_ACL,
) )
storage_cont_info = StorageContainerInfo(cont_id, default_wallet)
containers = [ containers.append(StorageContainer(storage_cont_info, self.shell, self.cluster))
StorageContainer(StorageContainerInfo(result.result(), default_wallet), self.shell, self.cluster) for result in results
]
return containers return containers
@allure.title("[Test] Create container") @reporter.step("Creation container")
@pytest.fixture() @pytest.fixture()
def container(self, default_wallet: WalletInfo) -> StorageContainer: def container(self, default_wallet: WalletInfo) -> StorageContainer:
select = len(self.cluster.cluster_nodes) select = len(self.cluster.cluster_nodes)
@ -76,7 +75,7 @@ class TestFailoverServer(ClusterTestBase):
storage_cont_info = StorageContainerInfo(cont_id, default_wallet) storage_cont_info = StorageContainerInfo(cont_id, default_wallet)
return StorageContainer(storage_cont_info, self.shell, self.cluster) return StorageContainer(storage_cont_info, self.shell, self.cluster)
@allure.title("[Class] Create objects") @reporter.step("Create object and delete after test")
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def storage_objects( def storage_objects(
self, self,
@ -84,32 +83,38 @@ class TestFailoverServer(ClusterTestBase):
containers: list[StorageContainer], containers: list[StorageContainer],
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
complex_object_size: ObjectSize, complex_object_size: ObjectSize,
) -> StorageObjectInfo:
count_object = request.param
object_sizes = [simple_object_size, complex_object_size]
object_list: list[StorageObjectInfo] = []
for cont in containers:
for _ in range(count_object):
object_list.append(cont.generate_object(size=random.choice(object_sizes).value))
for storage_object in object_list:
os.remove(storage_object.file_path)
yield object_list
@reporter.step("Select random node to stop and start it after test")
@pytest.fixture
def node_to_stop(
self, node_under_test: ClusterNode, cluster_state_controller: ClusterStateController
) -> ClusterNode:
yield node_under_test
with reporter.step(f"start {node_under_test.storage_node}"):
cluster_state_controller.start_stopped_hosts()
@reporter.step("Upload object with nodes and compare")
def get_corrupted_objects_list(
self, nodes: list[StorageNode], storage_objects: list[StorageObjectInfo]
) -> list[StorageObjectInfo]: ) -> list[StorageObjectInfo]:
object_count = request.param corrupted_objects = []
sizes_samples = [simple_object_size, complex_object_size] errors_get = []
samples_count = len(sizes_samples) for node in nodes:
assert object_count >= samples_count, f"Object count is too low, must be >= {samples_count}" for storage_object in storage_objects:
try:
sizes_weights = [2, 1] got_file_path = get_object(
sizes = sizes_samples + random.choices(sizes_samples, weights=sizes_weights, k=object_count - samples_count)
results = parallel(
[container.generate_object for _ in sizes for container in containers],
size=itertools.cycle([size.value for size in sizes]),
)
return [result.result() for result in results]
@allure.title("[Test] Create objects and get nodes with object")
@pytest.fixture()
def object_and_nodes(self, simple_object_size: ObjectSize, container: StorageContainer) -> tuple[StorageObjectInfo, list[ClusterNode]]:
object_info = container.generate_object(simple_object_size.value)
object_nodes = get_object_nodes(self.cluster, object_info.cid, object_info.oid, self.cluster.cluster_nodes[0])
return object_info, object_nodes
def _verify_object(self, storage_object: StorageObjectInfo, node: StorageNode):
with reporter.step(f"Verify object {storage_object.oid} from node {node}"):
file_path = get_object(
storage_object.wallet, storage_object.wallet,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
@ -117,79 +122,111 @@ class TestFailoverServer(ClusterTestBase):
shell=self.shell, shell=self.shell,
timeout="60s", timeout="60s",
) )
if storage_object.file_hash != get_file_hash(got_file_path):
corrupted_objects.append(storage_object)
os.remove(got_file_path)
except RuntimeError:
errors_get.append(storage_object.oid)
assert storage_object.file_hash == get_file_hash(file_path) assert len(errors_get) == 0, f"Get failed - {errors_get}"
return corrupted_objects
@reporter.step("Verify objects") def check_objects_replication(
def verify_objects(self, nodes: list[StorageNode], storage_objects: list[StorageObjectInfo]) -> None: self, storage_objects: list[StorageObjectInfo], storage_nodes: list[StorageNode]
workers_count = os.environ.get("PARALLEL_CUSTOM_LIMIT", 50) ) -> None:
with parallel_workers_limit(int(workers_count)): for storage_object in storage_objects:
parallel(self._verify_object, storage_objects * len(nodes), node=itertools.cycle(nodes)) wait_object_replication(
storage_object.cid,
storage_object.oid,
2,
shell=self.shell,
nodes=storage_nodes,
sleep_interval=45,
attempts=60,
)
@pytest.fixture()
def object_and_nodes(
self, simple_object_size: ObjectSize, container: StorageContainer
) -> tuple[StorageObjectInfo, list[ClusterNode]]:
object_info = container.generate_object(simple_object_size.value)
object_nodes = get_object_nodes(
cluster=self.cluster, cid=object_info.cid, oid=object_info.oid, alive_node=self.cluster.cluster_nodes[0]
)
return object_info, object_nodes
@pytest.fixture()
def up_stop_nodes(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.start_stopped_hosts()
@allure.title("Full shutdown node") @allure.title("Full shutdown node")
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True) @pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
def test_complete_node_shutdown( def test_complete_node_shutdown(
self, self,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
node_under_test: ClusterNode, node_to_stop: ClusterNode,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
): ):
with reporter.step(f"Remove one node from the list of nodes"): with reporter.step(f"Remove {node_to_stop} from the list of nodes"):
alive_nodes = list(set(self.cluster.cluster_nodes) - {node_under_test}) alive_nodes = list(set(self.cluster.cluster_nodes) - {node_to_stop})
storage_nodes = [cluster.storage_node for cluster in alive_nodes] storage_nodes = [cluster.storage_node for cluster in alive_nodes]
with reporter.step("Tick 2 epochs and wait for 2 blocks"): with reporter.step("Tick epoch and wait for 2 blocks"):
self.tick_epochs(2, storage_nodes[0], wait_block=2) self.tick_epochs(1, storage_nodes[0], wait_block=2)
with reporter.step(f"Stop node"): with reporter.step(f"Stop node"):
cluster_state_controller.stop_node_host(node_under_test, "hard") cluster_state_controller.stop_node_host(node=node_to_stop, mode="hard")
with reporter.step("Verify that there are no corrupted objects"): with reporter.step("Verify that there are no corrupted objects"):
self.verify_objects(storage_nodes, storage_objects) corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list
with reporter.step(f"Check node still in map"): with reporter.step(f"check {node_to_stop.storage_node} in map"):
self.wait_node_in_map(node_under_test.storage_node, self.shell, alive_node=storage_nodes[0]) self.wait_node_in_map(node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0])
count_tick_epoch = int(alive_nodes[0].ir_node.get_netmap_cleaner_threshold()) + 4 count_tick_epoch = int(alive_nodes[0].ir_node.get_netmap_cleaner_threshold()) + 4
with reporter.step(f"Tick {count_tick_epoch} epochs and wait for 2 blocks"): with reporter.step(f"Tick {count_tick_epoch} epochs and wait for 2 blocks"):
self.tick_epochs(count_tick_epoch, storage_nodes[0], wait_block=2) self.tick_epochs(count_tick_epoch, storage_nodes[0], wait_block=2)
with reporter.step(f"Check node in not map after {count_tick_epoch} epochs"): with reporter.step(f"Check {node_to_stop} in not map"):
self.wait_node_not_in_map(node_under_test.storage_node, self.shell, alive_node=storage_nodes[0]) self.wait_node_not_in_map(node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0])
with reporter.step(f"Verify that there are no corrupted objects after {count_tick_epoch} epochs"): with reporter.step(f"Verify that there are no corrupted objects after {count_tick_epoch} epoch"):
self.verify_objects(storage_nodes, storage_objects) corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list
@allure.title("Temporarily disable a node") @allure.title("Temporarily disable a node")
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True) @pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
def test_temporarily_disable_a_node( def test_temporarily_disable_a_node(
self, self,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
node_under_test: ClusterNode, node_to_stop: ClusterNode,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
): ):
with reporter.step(f"Remove one node from the list"): with reporter.step(f"Remove {node_to_stop} from the list of nodes"):
storage_nodes = list(set(self.cluster.storage_nodes) - {node_under_test.storage_node}) storage_nodes = list(set(self.cluster.storage_nodes) - {node_to_stop.storage_node})
with reporter.step("Tick 2 epochs and wait for 2 blocks"): with reporter.step("Tick epoch and wait for 2 blocks"):
self.tick_epochs(2, storage_nodes[0], wait_block=2) self.tick_epochs(1, storage_nodes[0], wait_block=2)
with reporter.step(f"Stop node"): with reporter.step(f"Stop node"):
cluster_state_controller.stop_node_host(node_under_test, "hard") cluster_state_controller.stop_node_host(node_to_stop, "hard")
with reporter.step("Verify that there are no corrupted objects"): with reporter.step("Verify that there are no corrupted objects"):
self.verify_objects(storage_nodes, storage_objects) corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list
with reporter.step(f"Check node still in map"): with reporter.step(f"Check {node_to_stop} in map"):
self.wait_node_in_map(node_under_test.storage_node, self.shell, alive_node=storage_nodes[0]) self.wait_node_in_map(node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0])
with reporter.step(f"Start node"): cluster_state_controller.start_node_host(node_to_stop)
cluster_state_controller.start_node_host(node_under_test)
with reporter.step("Verify that there are no corrupted objects"): with reporter.step("Verify that there are no corrupted objects"):
self.verify_objects(storage_nodes, storage_objects) corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list
@allure.title("Not enough nodes in the container with policy - 'REP 3 CBF 1 SELECT 4 FROM *'") @allure.title("Not enough nodes in the container with policy - 'REP 3 CBF 1 SELECT 4 FROM *'")
def test_not_enough_nodes_in_container_rep_3( def test_not_enough_nodes_in_container_rep_3(
@ -198,9 +235,12 @@ class TestFailoverServer(ClusterTestBase):
default_wallet: WalletInfo, default_wallet: WalletInfo,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
simple_file: str, simple_file: str,
up_stop_nodes: None,
): ):
object_info, object_nodes = object_and_nodes object_info, object_nodes = object_and_nodes
endpoint_without_object = list(set(self.cluster.cluster_nodes) - set(object_nodes))[0].storage_node.get_rpc_endpoint() endpoint_without_object = list(set(self.cluster.cluster_nodes) - set(object_nodes))[
0
].storage_node.get_rpc_endpoint()
endpoint_with_object = object_nodes[0].storage_node.get_rpc_endpoint() endpoint_with_object = object_nodes[0].storage_node.get_rpc_endpoint()
with reporter.step("Stop all nodes with object except first one"): with reporter.step("Stop all nodes with object except first one"):
@ -212,7 +252,7 @@ class TestFailoverServer(ClusterTestBase):
with reporter.step(f"Get object from node with object"): with reporter.step(f"Get object from node with object"):
get_object(default_wallet, object_info.cid, object_info.oid, self.shell, endpoint_with_object) get_object(default_wallet, object_info.cid, object_info.oid, self.shell, endpoint_with_object)
with reporter.step(f"[Negative] Put operation to node with object"): with reporter.step(f"Put operation to node with object, expect error"):
with pytest.raises(RuntimeError): with pytest.raises(RuntimeError):
put_object(default_wallet, simple_file, object_info.cid, self.shell, endpoint_with_object) put_object(default_wallet, simple_file, object_info.cid, self.shell, endpoint_with_object)
@ -222,6 +262,7 @@ class TestFailoverServer(ClusterTestBase):
default_wallet: WalletInfo, default_wallet: WalletInfo,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
simple_file: str, simple_file: str,
up_stop_nodes: None,
): ):
with reporter.step("Create container with full network map"): with reporter.step("Create container with full network map"):
node_count = len(self.cluster.cluster_nodes) node_count = len(self.cluster.cluster_nodes)
@ -241,18 +282,19 @@ class TestFailoverServer(ClusterTestBase):
object_nodes = get_object_nodes(self.cluster, cid, oid, self.cluster.cluster_nodes[0]) object_nodes = get_object_nodes(self.cluster, cid, oid, self.cluster.cluster_nodes[0])
with reporter.step("Choose node to stop"): with reporter.step("Choose node to stop"):
node_under_test = random.choice(object_nodes) node_to_stop = random.choice(object_nodes)
alive_node_with_object = random.choice(list(set(object_nodes) - {node_under_test})) alive_node_with_object = random.choice(list(set(object_nodes) - {node_to_stop}))
alive_endpoint_with_object = alive_node_with_object.storage_node.get_rpc_endpoint() alive_endpoint_with_object = alive_node_with_object.storage_node.get_rpc_endpoint()
with reporter.step("Stop random node with object"): with reporter.step("Stop random node with object"):
cluster_state_controller.stop_node_host(node_under_test, "hard") cluster_state_controller.stop_node_host(node_to_stop, "hard")
with reporter.step("Put object to alive node with object"): with reporter.step("Put object to alive node with object"):
oid_2 = put_object(default_wallet, simple_file, cid, self.shell, alive_endpoint_with_object) oid_2 = put_object(default_wallet, simple_file, cid, self.shell, alive_endpoint_with_object)
with reporter.step("Get object from alive node with object"): with reporter.step("Get object from alive node with object"):
get_object(default_wallet, cid, oid_2, self.shell, alive_endpoint_with_object) get_file = get_object(default_wallet, cid, oid_2, self.shell, alive_endpoint_with_object)
os.remove(get_file)
with reporter.step("Create container on alive node"): with reporter.step("Create container on alive node"):
create_container( create_container(

View file

@ -9,7 +9,6 @@ from frostfs_testlib import reporter
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import ( from frostfs_testlib.steps.node_management import (
@ -34,8 +33,6 @@ from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_keeper import FileKeeper from frostfs_testlib.utils.file_keeper import FileKeeper
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...resources.common import S3_POLICY_FILE_LOCATION
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
stopped_nodes: list[StorageNode] = [] stopped_nodes: list[StorageNode] = []
@ -48,6 +45,20 @@ def file_keeper():
keeper.restore_files() keeper.restore_files()
@reporter.step("Return all stopped hosts")
@pytest.fixture(scope="function", autouse=True)
def after_run_return_all_stopped_hosts(cluster_state_controller: ClusterStateController) -> str:
yield
cluster_state_controller.start_stopped_hosts()
@reporter.step("Return all stopped services after test")
@pytest.fixture(scope="function")
def after_run_return_all_stopped_services(cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.start_all_stopped_services()
@pytest.mark.failover @pytest.mark.failover
@pytest.mark.failover_storage @pytest.mark.failover_storage
class TestFailoverStorage(ClusterTestBase): class TestFailoverStorage(ClusterTestBase):
@ -97,7 +108,9 @@ class TestFailoverStorage(ClusterTestBase):
) )
with reporter.step("Check object data is not corrupted"): with reporter.step("Check object data is not corrupted"):
got_file_path = get_object(wallet, cid, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell) got_file_path = get_object(
wallet, cid, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path) assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
with reporter.step("Return all hosts"): with reporter.step("Return all hosts"):
@ -105,10 +118,11 @@ class TestFailoverStorage(ClusterTestBase):
with reporter.step("Check object data is not corrupted"): with reporter.step("Check object data is not corrupted"):
replicated_nodes = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes) replicated_nodes = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
got_file_path = get_object(wallet, cid, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint()) got_file_path = get_object(
wallet, cid, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint()
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path) assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
@allure.title("Do not ignore unhealthy tree endpoints (s3_client={s3_client})") @allure.title("Do not ignore unhealthy tree endpoints (s3_client={s3_client})")
def test_unhealthy_tree( def test_unhealthy_tree(
self, self,
@ -116,7 +130,7 @@ class TestFailoverStorage(ClusterTestBase):
default_wallet: WalletInfo, default_wallet: WalletInfo,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
bucket_container_resolver: BucketContainerResolver, after_run_return_all_stopped_services,
): ):
default_node = self.cluster.cluster_nodes[0] default_node = self.cluster.cluster_nodes[0]
@ -134,7 +148,7 @@ class TestFailoverStorage(ClusterTestBase):
with reporter.step("Create bucket with REP 1 SELECT 1 policy"): with reporter.step("Create bucket with REP 1 SELECT 1 policy"):
bucket = s3_client.create_bucket( bucket = s3_client.create_bucket(
location_constraint="rep-1", location_constraint="load-1-1",
) )
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
@ -149,7 +163,6 @@ class TestFailoverStorage(ClusterTestBase):
wallet=default_wallet, wallet=default_wallet,
shell=self.shell, shell=self.shell,
endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(), endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
bucket_container_resolver=bucket_container_resolver,
)[0] )[0]
with reporter.step("Turn off all storage nodes except bucket node"): with reporter.step("Turn off all storage nodes except bucket node"):
@ -210,6 +223,9 @@ class TestEmptyMap(ClusterTestBase):
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name] bucket_objects = [file_name]
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
s3_client.put_object(bucket, file_path) s3_client.put_object(bucket, file_path)
@ -270,6 +286,9 @@ class TestEmptyMap(ClusterTestBase):
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name] bucket_objects = [file_name]
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
s3_client.put_object(bucket, file_path) s3_client.put_object(bucket, file_path)
@ -280,7 +299,9 @@ class TestEmptyMap(ClusterTestBase):
cluster_state_controller.stop_services_of_type(StorageNode) cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Remove all nodes from network map"): with reporter.step("Remove all nodes from network map"):
remove_nodes_from_map_morph(shell=self.shell, cluster=self.cluster, remove_nodes=self.cluster.services(StorageNode)) remove_nodes_from_map_morph(
shell=self.shell, cluster=self.cluster, remove_nodes=self.cluster.services(StorageNode)
)
with reporter.step("Return all storage nodes to network map"): with reporter.step("Return all storage nodes to network map"):
self.return_nodes_after_stop_with_check_empty_map(cluster_state_controller) self.return_nodes_after_stop_with_check_empty_map(cluster_state_controller)
@ -316,8 +337,8 @@ class TestEmptyMap(ClusterTestBase):
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
bucket: str,
): ):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
@ -359,8 +380,9 @@ class TestEmptyMap(ClusterTestBase):
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
bucket: str,
): ):
bucket = s3_client.create_bucket()
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
@ -400,8 +422,8 @@ class TestEmptyMap(ClusterTestBase):
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
versioning_status: VersioningStatus, versioning_status: VersioningStatus,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
bucket: str,
): ):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status) s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status)
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
@ -447,9 +469,14 @@ class TestStorageDataLoss(ClusterTestBase):
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
complex_object_size: ObjectSize, complex_object_size: ObjectSize,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
after_run_return_all_stopped_services: str,
file_keeper: FileKeeper, file_keeper: FileKeeper,
bucket: str,
): ):
allure.dynamic.description(after_run_return_all_stopped_services)
with reporter.step("Create bucket"):
bucket = s3_client.create_bucket()
with reporter.step("Put objects into bucket"): with reporter.step("Put objects into bucket"):
simple_object_path = generate_file(simple_object_size.value) simple_object_path = generate_file(simple_object_size.value)
simple_object_key = s3_helper.object_key_from_file_path(simple_object_path) simple_object_key = s3_helper.object_key_from_file_path(simple_object_path)
@ -461,7 +488,9 @@ class TestStorageDataLoss(ClusterTestBase):
s3_client.put_object(bucket, complex_object_path) s3_client.put_object(bucket, complex_object_path)
with reporter.step("Check objects are in bucket"): with reporter.step("Check objects are in bucket"):
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[simple_object_key, complex_object_key]) s3_helper.check_objects_in_bucket(
s3_client, bucket, expected_objects=[simple_object_key, complex_object_key]
)
with reporter.step("Stop storage services on all nodes"): with reporter.step("Stop storage services on all nodes"):
cluster_state_controller.stop_services_of_type(StorageNode) cluster_state_controller.stop_services_of_type(StorageNode)
@ -509,8 +538,10 @@ class TestStorageDataLoss(ClusterTestBase):
shards_watcher: ShardsWatcher, shards_watcher: ShardsWatcher,
default_wallet: WalletInfo, default_wallet: WalletInfo,
test_start_time: datetime, test_start_time: datetime,
after_run_return_all_stopped_services: str,
): ):
exception_messages = [] exception_messages = []
allure.dynamic.description(after_run_return_all_stopped_services)
with reporter.step(f"Create container on node {node_under_test}"): with reporter.step(f"Create container on node {node_under_test}"):
locode = node_under_test.storage_node.get_un_locode() locode = node_under_test.storage_node.get_un_locode()
@ -575,18 +606,23 @@ class TestStorageDataLoss(ClusterTestBase):
exception_messages.append(f"Shard {shard} changed status to {status}") exception_messages.append(f"Shard {shard} changed status to {status}")
with reporter.step("No related errors should be in log"): with reporter.step("No related errors should be in log"):
if node_under_test.host.is_message_in_logs(message_regex=r"\Wno such file or directory\W", since=test_start_time): if node_under_test.host.is_message_in_logs(
message_regex=r"\Wno such file or directory\W", since=test_start_time
):
exception_messages.append(f"Node {node_under_test} have shard errors in logs") exception_messages.append(f"Node {node_under_test} have shard errors in logs")
with reporter.step("Pass test if no errors found"): with reporter.step("Pass test if no errors found"):
assert not exception_messages, "\n".join(exception_messages) assert not exception_messages, "\n".join(exception_messages)
@allure.title("Loss of one node should trigger use of tree and storage service in another node (s3_client={s3_client})") @allure.title(
"Loss of one node should trigger use of tree and storage service in another node (s3_client={s3_client})"
)
def test_s3_one_endpoint_loss( def test_s3_one_endpoint_loss(
self, self,
bucket, bucket,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
after_run_return_all_stopped_services,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
): ):
# TODO: need to check that s3 gate is connected to localhost (such metric will be supported in 1.3) # TODO: need to check that s3 gate is connected to localhost (such metric will be supported in 1.3)
@ -603,7 +639,6 @@ class TestStorageDataLoss(ClusterTestBase):
put_object = s3_client.put_object(bucket, file_path) put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name]) s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
@allure.title("After Pilorama.db loss on one node object is retrievable (s3_client={s3_client})") @allure.title("After Pilorama.db loss on one node object is retrievable (s3_client={s3_client})")
def test_s3_one_pilorama_loss( def test_s3_one_pilorama_loss(
self, self,
@ -612,7 +647,7 @@ class TestStorageDataLoss(ClusterTestBase):
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
): ):
bucket = s3_client.create_bucket( bucket = s3_client.create_bucket(
location_constraint="rep3", location_constraint="load-1-4",
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers", grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
) )
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)

View file

@ -1,7 +1,8 @@
import logging import logging
import os
import random import random
from time import sleep from time import sleep
from typing import Callable, Optional, Tuple from typing import Optional, Tuple
import allure import allure
import pytest import pytest
@ -9,6 +10,7 @@ from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.cli.netmap_parser import NetmapParser
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
@ -34,29 +36,31 @@ from frostfs_testlib.steps.node_management import (
wait_for_node_to_be_ready, wait_for_node_to_be_ready,
) )
from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import string_utils from frostfs_testlib.utils import datetime_utils, string_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
check_nodes: list[StorageNode] = [] check_nodes: list[StorageNode] = []
@allure.title("Add one node to cluster")
@pytest.mark.add_nodes
@pytest.mark.node_mgmt @pytest.mark.node_mgmt
@pytest.mark.failover
@pytest.mark.order(10)
class TestNodeManagement(ClusterTestBase): class TestNodeManagement(ClusterTestBase):
@pytest.fixture @pytest.fixture
@allure.title("Create container and pick the node with data") @allure.title("Create container and pick the node with data")
def create_container_and_pick_node(self, default_wallet: WalletInfo, simple_object_size: ObjectSize) -> Tuple[str, StorageNode]: def create_container_and_pick_node(
self, default_wallet: WalletInfo, simple_object_size: ObjectSize
) -> Tuple[str, StorageNode]:
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
endpoint = self.cluster.default_rpc_endpoint endpoint = self.cluster.default_rpc_endpoint
@ -96,6 +100,11 @@ class TestNodeManagement(ClusterTestBase):
continue continue
return return
@pytest.fixture
def after_run_start_all_nodes(self):
yield
self.return_nodes()
@pytest.fixture @pytest.fixture
def return_nodes_after_test_run(self): def return_nodes_after_test_run(self):
yield yield
@ -111,17 +120,18 @@ class TestNodeManagement(ClusterTestBase):
# We need to wait for node to establish notifications from morph-chain # We need to wait for node to establish notifications from morph-chain
# Otherwise it will hang up when we will try to set status # Otherwise it will hang up when we will try to set status
self.wait_for_blocks() sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
with reporter.step(f"Move node {node} to online state"): with reporter.step(f"Move node {node} to online state"):
storage_node_set_status(node, status="online", retries=2) storage_node_set_status(node, status="online", retries=2)
check_nodes.remove(node) check_nodes.remove(node)
self.wait_for_blocks() sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
self.tick_epoch_with_retries(3, wait_block=2) self.tick_epoch_with_retries(3, wait_block=2)
check_node_in_map(node, shell=self.shell, alive_node=alive_node) check_node_in_map(node, shell=self.shell, alive_node=alive_node)
@allure.title("Add one node to cluster") @allure.title("Add one node to cluster")
@pytest.mark.add_nodes
def test_add_nodes( def test_add_nodes(
self, self,
default_wallet: WalletInfo, default_wallet: WalletInfo,
@ -138,7 +148,9 @@ class TestNodeManagement(ClusterTestBase):
storage_nodes = self.cluster.storage_nodes storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:]) random_node = random.choice(storage_nodes[1:])
alive_node = random.choice([storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]) alive_node = random.choice(
[storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]
)
check_node_in_map(random_node, shell=self.shell, alive_node=alive_node) check_node_in_map(random_node, shell=self.shell, alive_node=alive_node)
@ -198,6 +210,7 @@ class TestNodeManagement(ClusterTestBase):
) )
wait_object_replication(cid, oid, 4, shell=self.shell, nodes=storage_nodes) wait_object_replication(cid, oid, 4, shell=self.shell, nodes=storage_nodes)
@pytest.mark.node_mgmt
@allure.title("Drop object using control command") @allure.title("Drop object using control command")
def test_drop_object(self, default_wallet, complex_object_size: ObjectSize, simple_object_size: ObjectSize): def test_drop_object(self, default_wallet, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
""" """
@ -229,6 +242,7 @@ class TestNodeManagement(ClusterTestBase):
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, get_object) self.wait_for_obj_dropped(wallet, cid, oid, endpoint, get_object)
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, head_object) self.wait_for_obj_dropped(wallet, cid, oid, endpoint, head_object)
@pytest.mark.node_mgmt
@pytest.mark.skip(reason="Need to clarify scenario") @pytest.mark.skip(reason="Need to clarify scenario")
@allure.title("Control Operations with storage nodes") @allure.title("Control Operations with storage nodes")
def test_shards( def test_shards(
@ -271,6 +285,7 @@ class TestNodeManagement(ClusterTestBase):
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster) oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
delete_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint) delete_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
@pytest.mark.node_mgmt
@allure.title("Put object with stopped node") @allure.title("Put object with stopped node")
def test_stop_node(self, default_wallet, return_nodes_after_test_run, simple_object_size: ObjectSize): def test_stop_node(self, default_wallet, return_nodes_after_test_run, simple_object_size: ObjectSize):
wallet = default_wallet wallet = default_wallet
@ -278,7 +293,9 @@ class TestNodeManagement(ClusterTestBase):
source_file_path = generate_file(simple_object_size.value) source_file_path = generate_file(simple_object_size.value)
storage_nodes = self.cluster.storage_nodes storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:]) random_node = random.choice(storage_nodes[1:])
alive_node = random.choice([storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]) alive_node = random.choice(
[storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]
)
cid = create_container( cid = create_container(
wallet, wallet,
@ -301,7 +318,7 @@ class TestNodeManagement(ClusterTestBase):
self.return_nodes(alive_node) self.return_nodes(alive_node)
@reporter.step("Wait for object to be dropped") @reporter.step("Wait for object to be dropped")
def wait_for_obj_dropped(self, wallet: str, cid: str, oid: str, endpoint: str, checker: Callable) -> None: def wait_for_obj_dropped(self, wallet: str, cid: str, oid: str, endpoint: str, checker) -> None:
for _ in range(3): for _ in range(3):
try: try:
checker(wallet, cid, oid, shell=self.shell, endpoint=endpoint) checker(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
@ -315,8 +332,6 @@ class TestNodeManagement(ClusterTestBase):
@pytest.mark.maintenance @pytest.mark.maintenance
@pytest.mark.failover
@pytest.mark.order(9)
class TestMaintenanceMode(ClusterTestBase): class TestMaintenanceMode(ClusterTestBase):
@pytest.fixture() @pytest.fixture()
@allure.title("Init Frostfs CLI remote") @allure.title("Init Frostfs CLI remote")
@ -333,6 +348,14 @@ class TestMaintenanceMode(ClusterTestBase):
cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path)
return cli return cli
@pytest.fixture()
@allure.title("Init Frostfs CLI remote")
def frostfs_cli(self, default_wallet: WalletInfo) -> FrostfsCli:
cli = FrostfsCli(
shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=default_wallet.config_path
)
return cli
@pytest.fixture() @pytest.fixture()
def restore_node_status(self, cluster_state_controller: ClusterStateController, default_wallet: WalletInfo): def restore_node_status(self, cluster_state_controller: ClusterStateController, default_wallet: WalletInfo):
nodes_to_restore = [] nodes_to_restore = []
@ -342,15 +365,23 @@ class TestMaintenanceMode(ClusterTestBase):
for node_to_restore in nodes_to_restore: for node_to_restore in nodes_to_restore:
cluster_state_controller.set_node_status(node_to_restore, default_wallet, NodeStatus.ONLINE) cluster_state_controller.set_node_status(node_to_restore, default_wallet, NodeStatus.ONLINE)
def check_node_status(self, expected_status: NodeStatus, node_under_test: ClusterNode, frostfs_cli: FrostfsCli, rpc_endpoint: str): self.tick_epoch(wait_block=2)
def check_node_status(
self, expected_status: NodeStatus, node_under_test: ClusterNode, frostfs_cli: FrostfsCli, rpc_endpoint: str
):
netmap = frostfs_cli.netmap.snapshot(rpc_endpoint).stdout netmap = frostfs_cli.netmap.snapshot(rpc_endpoint).stdout
all_snapshots = NetmapParser.snapshot_all_nodes(netmap) all_snapshots = NetmapParser.snapshot_all_nodes(netmap)
node_snapshot = [snapshot for snapshot in all_snapshots if node_under_test.host_ip == snapshot.node] node_snapshot = [snapshot for snapshot in all_snapshots if node_under_test.host_ip == snapshot.node]
if expected_status == NodeStatus.OFFLINE and not node_snapshot: if expected_status == NodeStatus.OFFLINE and not node_snapshot:
assert node_under_test.host_ip not in netmap, f"{node_under_test} status should be {expected_status}. See netmap:\n{netmap}" assert (
node_under_test.host_ip not in netmap
), f"{node_under_test} status should be {expected_status}. See netmap:\n{netmap}"
return return
assert node_snapshot, f"{node_under_test} status should be {expected_status}, but was not in netmap. See netmap:\n{netmap}" assert (
node_snapshot
), f"{node_under_test} status should be {expected_status}, but was not in netmap. See netmap:\n{netmap}"
node_snapshot = node_snapshot[0] node_snapshot = node_snapshot[0]
assert ( assert (
expected_status == node_snapshot.node_status expected_status == node_snapshot.node_status
@ -381,6 +412,7 @@ class TestMaintenanceMode(ClusterTestBase):
node_under_test = nodes_with_container[0] node_under_test = nodes_with_container[0]
endpoint = node_under_test.storage_node.get_rpc_endpoint() endpoint = node_under_test.storage_node.get_rpc_endpoint()
restore_node_status.append(node_under_test)
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
oid = put_object( oid = put_object(
@ -390,9 +422,7 @@ class TestMaintenanceMode(ClusterTestBase):
shell=self.shell, shell=self.shell,
endpoint=endpoint, endpoint=endpoint,
) )
with reporter.step("Set node status to 'maintenance'"): with reporter.step("Set node status to 'maintenance'"):
restore_node_status.append(node_under_test)
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)
node_under_maintenance_error = "node is under maintenance" node_under_maintenance_error = "node is under maintenance"
@ -424,6 +454,8 @@ class TestMaintenanceMode(ClusterTestBase):
with pytest.raises(RuntimeError, match=node_under_maintenance_error): with pytest.raises(RuntimeError, match=node_under_maintenance_error):
put_object(default_wallet, file_path, cid, self.shell, endpoint) put_object(default_wallet, file_path, cid, self.shell, endpoint)
os.remove(file_path)
@pytest.mark.sanity @pytest.mark.sanity
@allure.title("MAINTENANCE and OFFLINE mode transitions") @allure.title("MAINTENANCE and OFFLINE mode transitions")
def test_mode_transitions( def test_mode_transitions(
@ -440,9 +472,15 @@ class TestMaintenanceMode(ClusterTestBase):
alive_storage_node = alive_nodes[0].storage_node alive_storage_node = alive_nodes[0].storage_node
alive_rpc_endpoint = alive_storage_node.get_rpc_endpoint() alive_rpc_endpoint = alive_storage_node.get_rpc_endpoint()
with reporter.step("Tick epoch"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Set node status to 'offline'"): with reporter.step("Set node status to 'offline'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.OFFLINE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.OFFLINE)
with reporter.step("Tick epoch to update the network map"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Check node status is 'offline' after update the network map"): with reporter.step("Check node status is 'offline' after update the network map"):
self.check_node_status(NodeStatus.OFFLINE, node_under_test, frostfs_cli, alive_rpc_endpoint) self.check_node_status(NodeStatus.OFFLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
@ -456,6 +494,9 @@ class TestMaintenanceMode(ClusterTestBase):
with reporter.step("Check node status is 'online' after storage service restart"): with reporter.step("Check node status is 'online' after storage service restart"):
self.check_node_status(NodeStatus.ONLINE, node_under_test, frostfs_cli, alive_rpc_endpoint) self.check_node_status(NodeStatus.ONLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Set node status to 'maintenance'"): with reporter.step("Set node status to 'maintenance'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)
@ -469,6 +510,9 @@ class TestMaintenanceMode(ClusterTestBase):
with reporter.step("Check node staus is 'maintenance' after storage service restart"): with reporter.step("Check node staus is 'maintenance' after storage service restart"):
self.check_node_status(NodeStatus.MAINTENANCE, node_under_test, frostfs_cli, alive_rpc_endpoint) self.check_node_status(NodeStatus.MAINTENANCE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Set node status to 'offline'"): with reporter.step("Set node status to 'offline'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.OFFLINE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.OFFLINE)
@ -481,12 +525,18 @@ class TestMaintenanceMode(ClusterTestBase):
with reporter.step("Start storage service"): with reporter.step("Start storage service"):
cluster_state_controller.start_storage_service(node_under_test) cluster_state_controller.start_storage_service(node_under_test)
with reporter.step("Check node status is 'offline' after storage service start"):
self.check_node_status(NodeStatus.OFFLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Tick 2 epochs"): with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2) self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Check node status is 'online' after storage service start"): with reporter.step("Check node status is 'online' after storage service start"):
self.check_node_status(NodeStatus.ONLINE, node_under_test, frostfs_cli, alive_rpc_endpoint) self.check_node_status(NodeStatus.ONLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Set node status to 'maintenance'"): with reporter.step("Set node status to 'maintenance'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE) cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)
@ -514,7 +564,7 @@ class TestMaintenanceMode(ClusterTestBase):
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
node_under_test: ClusterNode, node_under_test: ClusterNode,
frostfs_cli_remote: FrostfsCli, frostfs_cli_remote: FrostfsCli,
default_wallet: WalletInfo, frostfs_cli: FrostfsCli,
restore_node_status: list[ClusterNode], restore_node_status: list[ClusterNode],
): ):
restore_node_status.append(node_under_test) restore_node_status.append(node_under_test)
@ -531,4 +581,24 @@ class TestMaintenanceMode(ClusterTestBase):
cluster_state_controller.set_maintenance_mode_allowed("true", node_under_test) cluster_state_controller.set_maintenance_mode_allowed("true", node_under_test)
with reporter.step("Set node status to 'maintenance'"): with reporter.step("Set node status to 'maintenance'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE) output = frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status="maintenance")
assert "update request successfully sent" in output.stdout, f"Response = {output}"
with reporter.step("Tick epoch"):
self.tick_epoch(wait_block=2)
with reporter.step("Check node status is 'maintenance'"):
self.check_node_status(
NodeStatus.MAINTENANCE, node_under_test, frostfs_cli, node_under_test.storage_node.get_rpc_endpoint()
)
with reporter.step("Set node status to 'online'"):
frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status="online")
with reporter.step("Tick epoch"):
self.tick_epoch()
with reporter.step("Check node status is 'online'"):
self.check_node_status(
NodeStatus.ONLINE, node_under_test, frostfs_cli, node_under_test.storage_node.get_rpc_endpoint()
)

View file

@ -1,63 +1,71 @@
import math import math
import re
import allure import allure
from frostfs_testlib.testing.parallel import parallel
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container, delete_container, search_nodes_with_container, wait_for_container_deletion from frostfs_testlib.steps.cli.container import create_container, delete_container
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import delete_object, get_object_nodes, put_object_to_random_node
from frostfs_testlib.steps.metrics import calc_metrics_count_from_stdout, check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.utility import are_numbers_similar
@pytest.mark.nightly
@pytest.mark.container @pytest.mark.container
class TestContainerMetrics(ClusterTestBase): class TestContainerMetrics(ClusterTestBase):
@reporter.step("Put object to container: {cid}") @wait_for_success(interval=10)
def put_object_parallel(self, file_path: str, wallet: WalletInfo, cid: str): def check_sum_counter_metrics_in_nodes(
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster) self, cluster_nodes: list[ClusterNode], cid: str, phy_exp: int, logic_exp: int, user_exp: int
return oid ):
counter_phy = 0
counter_logic = 0
counter_user = 0
for cluster_node in cluster_nodes:
metric_result = cluster_node.metrics.storage.get_metric_container(f"container_objects_total", cid)
counter_phy += self.get_count_metric_type_from_stdout(metric_result.stdout, "phy")
counter_logic += self.get_count_metric_type_from_stdout(metric_result.stdout, "logic")
counter_user += self.get_count_metric_type_from_stdout(metric_result.stdout, "user")
@reporter.step("Get metrics value from node") assert counter_phy == phy_exp, f"Expected metric Phy={phy_exp}, Actual: {counter_phy} in nodes: {cluster_nodes}"
def get_metrics_search_by_greps_parallel(self, node: ClusterNode, **greps): assert (
try: counter_logic == logic_exp
content_stdout = node.metrics.storage.get_metrics_search_by_greps(greps) ), f"Expected metric logic={logic_exp}, Actual: {counter_logic} in nodes: {cluster_nodes}"
return calc_metrics_count_from_stdout(content_stdout) assert (
except Exception as e: counter_user == user_exp
return None ), f"Expected metric User={user_exp}, Actual: {counter_user} in nodes: {cluster_nodes}"
@allure.title("Container metrics (obj_size={object_size},policy={policy})") @staticmethod
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")]) def get_count_metric_type_from_stdout(metric_result_stdout: str, metric_type: str):
result = re.findall(rf'type="{metric_type}"}}\s(\d+)', metric_result_stdout)
return sum(map(int, result))
@allure.title("Container metrics (obj_size={object_size})")
def test_container_metrics( def test_container_metrics(
self, self, object_size: ObjectSize, max_object_size: int, default_wallet: WalletInfo, cluster: Cluster
object_size: ObjectSize,
max_object_size: int,
default_wallet: WalletInfo,
cluster: Cluster,
placement_policy: str,
policy: str,
): ):
file_path = generate_file(object_size.value) file_path = generate_file(object_size.value)
copies = 2 if policy == "REP" else 1 placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
object_chunks = 1 copies = 2
object_chunks = 0
head_object = 1
link_object = 0 link_object = 0
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
if object_size.value > max_object_size: if object_size.value > max_object_size:
object_chunks = math.ceil(object_size.value / max_object_size) object_chunks = math.ceil(object_size.value / max_object_size)
link_object = len(search_nodes_with_container(default_wallet, cid, self.shell, cluster.default_rpc_endpoint, cluster)) link_object = 1
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(
default_wallet,
rule=placement_policy,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step("Put object to random node"): with reporter.step("Put object to random node"):
oid = put_object_to_random_node( storage_object_id = put_object_to_random_node(
wallet=default_wallet, wallet=default_wallet,
path=file_path, path=file_path,
cid=cid, cid=cid,
@ -65,150 +73,25 @@ class TestContainerMetrics(ClusterTestBase):
cluster=cluster, cluster=cluster,
) )
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Check metric appears in node where the object is located"): with reporter.step("Check metric appears in node where the object is located"):
count_metrics = (object_chunks * copies) + link_object object_nodes = get_object_nodes(
if policy == "EC": cluster=cluster, cid=cid, oid=storage_object_id, alive_node=cluster.cluster_nodes[0]
count_metrics = (object_chunks * 2) + link_object )
check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=cid, type="phy") count_metrics_exp = (object_chunks + head_object + link_object) * copies
check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=cid, type="logic") self.check_sum_counter_metrics_in_nodes(
check_metrics_counter(object_nodes, counter_exp=copies, command="container_objects_total", cid=cid, type="user") object_nodes, cid, phy_exp=count_metrics_exp, logic_exp=count_metrics_exp, user_exp=copies
)
with reporter.step("Delete file, wait until gc remove object"): with reporter.step("Delete file, wait until gc remove object"):
delete_object(default_wallet, cid, oid, self.shell, cluster.default_rpc_endpoint) delete_object(default_wallet, cid, storage_object_id, self.shell, self.cluster.default_rpc_endpoint)
count_metrics_exp = len(object_nodes)
with reporter.step(f"Check container metrics 'the counter should equal {len(object_nodes)}' in object nodes"): self.check_sum_counter_metrics_in_nodes(
check_metrics_counter(object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=cid, type="phy") object_nodes, cid, phy_exp=count_metrics_exp, logic_exp=count_metrics_exp, user_exp=0
check_metrics_counter(object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=cid, type="logic") )
check_metrics_counter(object_nodes, counter_exp=0, command="container_objects_total", cid=cid, type="user")
with reporter.step("Check metrics(Phy, Logic, User) in each nodes"): with reporter.step("Check metrics(Phy, Logic, User) in each nodes"):
# Phy and Logic metrics are 4, because in rule 'CBF 2 SELECT 2 FROM', cbf2*sel2=4 # Phy and Logic metrics are 4, because in rule 'CBF 2 SELECT 2 FROM', cbf2*sel2=4
expect_metrics = 4 if policy == "REP" else 2 self.check_sum_counter_metrics_in_nodes(cluster.cluster_nodes, cid, phy_exp=4, logic_exp=4, user_exp=0)
check_metrics_counter(cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=cid, type="phy")
check_metrics_counter(
cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=cid, type="logic"
)
check_metrics_counter(cluster.cluster_nodes, counter_exp=0, command="container_objects_total", cid=cid, type="user")
@allure.title("Container size metrics (obj_size={object_size},policy={policy})") with reporter.step("Delete container"):
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")]) delete_container(default_wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
def test_container_size_metrics(
self,
object_size: ObjectSize,
default_wallet: WalletInfo,
placement_policy: str,
policy: str,
):
file_path = generate_file(object_size.value)
with reporter.step(f"Create container with policy {policy}"):
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy)
with reporter.step("Put object to random node"):
oid = put_object_to_random_node(
wallet=default_wallet,
path=file_path,
cid=cid,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, self.cluster.storage_nodes)
object_nodes = [
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes
]
with reporter.step("Check metric appears in all node where the object is located"):
act_metric = sum(
[get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=cid) for node in object_nodes]
)
assert (act_metric // 2) == object_size.value
with reporter.step("Delete file, wait until gc remove object"):
id_tombstone = delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
tombstone = head_object(default_wallet, cid, id_tombstone, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Check container size metrics"):
act_metric = get_metrics_value(object_nodes[0], command="frostfs_node_engine_container_size_bytes", cid=cid)
assert act_metric == int(tombstone["header"]["payloadLength"])
@allure.title("Container size metrics put {objects_count} objects (obj_size={object_size})")
@pytest.mark.parametrize("objects_count", [5, 10, 20])
def test_container_size_metrics_more_objects(
self,
object_size: ObjectSize,
default_wallet: WalletInfo,
objects_count: int
):
with reporter.step(f"Create container"):
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Put {objects_count} objects"):
files_path = [generate_file(object_size.value) for _ in range(objects_count)]
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=cid)
oids = [future.result() for future in futures]
with reporter.step("Check metric appears in all nodes"):
metric_values = [get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=cid) for node in self.cluster.cluster_nodes]
actual_value = sum(metric_values) // 2 # for policy REP 2, value divide by 2
expected_value = object_size.value * objects_count
assert are_numbers_similar(actual_value, expected_value, tolerance_percentage=2), "metric container size bytes value not correct"
with reporter.step("Delete file, wait until gc remove object"):
tombstones_size = 0
for oid in oids:
tombstone_id = delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
tombstone = head_object(default_wallet, cid, tombstone_id, self.shell, self.cluster.default_rpc_endpoint)
tombstones_size += int(tombstone["header"]["payloadLength"])
with reporter.step(f"Check container size metrics, 'should be positive in all nodes'"):
futures = parallel(get_metrics_value, self.cluster.cluster_nodes, command="frostfs_node_engine_container_size_bytes", cid=cid)
metrics_value_nodes = [future.result() for future in futures]
for act_metric in metrics_value_nodes:
assert act_metric >= 0, "Metrics value is negative"
assert sum(metrics_value_nodes) // len(self.cluster.cluster_nodes) == tombstones_size, "tomstone size of objects not correct"
@allure.title("Container metrics (policy={policy})")
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")])
def test_container_metrics_delete_complex_objects(
self,
complex_object_size: ObjectSize,
default_wallet: WalletInfo,
cluster: Cluster,
placement_policy: str,
policy: str
):
copies = 2 if policy == "REP" else 1
objects_count = 2
metric_name = "frostfs_node_engine_container_objects_total"
with reporter.step(f"Create container"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, rule=placement_policy)
with reporter.step(f"Put {objects_count} objects"):
files_path = [generate_file(complex_object_size.value) for _ in range(objects_count)]
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=cid)
oids = [future.result() for future in futures]
with reporter.step(f"Check metrics value in each nodes, should be {objects_count} for 'user'"):
check_metrics_counter(cluster.cluster_nodes, counter_exp=objects_count * copies, command=metric_name, cid=cid, type="user")
with reporter.step("Delete objects and container"):
for oid in oids:
delete_object(default_wallet, cid, oid, self.shell, cluster.default_rpc_endpoint)
delete_container(default_wallet, cid, self.shell, cluster.default_rpc_endpoint)
with reporter.step("Tick epoch and check container was deleted"):
self.tick_epoch()
wait_for_container_deletion(default_wallet, cid, shell=self.shell, endpoint=cluster.default_rpc_endpoint)
with reporter.step(f"Check metrics value in each nodes, should not be show any result"):
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name, cid=cid)
metrics_results = [future.result() for future in futures if future.result() is not None]
assert len(metrics_results) == 0, f"Metrics value is not empty in Prometheus, actual value in nodes: {metrics_results}"

View file

@ -1,114 +0,0 @@
import random
import re
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import delete_object, put_object, put_object_to_random_node
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.nightly
class TestGarbageCollectorMetrics(ClusterTestBase):
@wait_for_success(interval=10)
def check_metrics_in_node(self, cluster_node: ClusterNode, counter_exp: int, **metrics_greps: str):
counter_act = 0
try:
metric_result = cluster_node.metrics.storage.get_metrics_search_by_greps(**metrics_greps)
counter_act += self.calc_metrics_count_from_stdout(metric_result.stdout)
except RuntimeError as e:
...
assert counter_act == counter_exp, f"Expected: {counter_exp}, Actual: {counter_act} in node: {cluster_node}"
@staticmethod
def calc_metrics_count_from_stdout(metric_result_stdout: str):
result = re.findall(r"}\s(\d+)", metric_result_stdout)
return sum(map(int, result))
@allure.title("Garbage collector expire_at object")
def test_garbage_collector_metrics_expire_at_object(self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster):
file_path = generate_file(simple_object_size.value)
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
metrics_step = 1
with reporter.step("Get current garbage collector metrics for each nodes"):
metrics_counter = {}
for node in cluster.cluster_nodes:
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total")
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
with reporter.step("Put object to random node with expire_at"):
current_epoch = self.get_epoch()
oid = put_object_to_random_node(
default_wallet,
file_path,
cid,
self.shell,
cluster,
expire_at=current_epoch + 1,
)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Tick Epoch"):
self.tick_epochs(epochs_to_tick=2, wait_block=2)
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}' in object nodes"):
for node in object_nodes:
metrics_counter[node] += metrics_step
for node, counter in metrics_counter.items():
check_metrics_counter(
[node],
counter_exp=counter,
command="frostfs_node_garbage_collector_marked_for_removal_objects_total",
)
@allure.title("Garbage collector delete object")
def test_garbage_collector_metrics_deleted_objects(self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster):
file_path = generate_file(simple_object_size.value)
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
metrics_step = 1
with reporter.step("Get current garbage collector metrics for each nodes"):
metrics_counter = {}
for node in cluster.cluster_nodes:
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total")
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
with reporter.step("Put object to random node"):
oid = put_object_to_random_node(
default_wallet,
file_path,
cid,
self.shell,
cluster,
)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Delete file, wait until gc remove object"):
delete_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}'"):
for node in object_nodes:
exp_metrics_counter = metrics_counter[node] + metrics_step
check_metrics_counter(
[node], counter_exp=exp_metrics_counter, command="frostfs_node_garbage_collector_deleted_objects_total"
)

View file

@ -1,207 +0,0 @@
import random
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.steps.cli.container import create_container, get_container, list_containers
from frostfs_testlib.steps.cli.object import get_object, head_object, put_object, search_object
from frostfs_testlib.steps.cli.tree import get_tree_list
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.nightly
class TestGRPCMetrics(ClusterTestBase):
@pytest.fixture
def disable_policer(self, cluster_state_controller: ClusterStateController):
config_manager = cluster_state_controller.manager(ConfigStateManager)
config_manager.set_on_all_nodes(StorageNode, {"policer:unsafe_disable": "true"})
yield
cluster_state_controller.manager(ConfigStateManager).revert_all()
@allure.title("GRPC metrics container operations")
def test_grpc_metrics_container_operations(self, default_wallet: WalletInfo, cluster: Cluster):
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
with reporter.step("Get current gRPC metrics for method 'Put'"):
metrics_counter_put = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="Put")
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by 1'"):
metrics_counter_put += 1
check_metrics_counter(
[node],
counter_exp=metrics_counter_put,
command="grpc_server_handled_total",
service="ContainerService",
method="Put",
)
with reporter.step("Get current gRPC metrics for method 'Get'"):
metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="Get")
with reporter.step(f"Get container"):
get_container(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"):
metrics_counter_get += 1
check_metrics_counter(
[node],
counter_exp=metrics_counter_get,
command="grpc_server_handled_total",
service="ContainerService",
method="Get",
)
with reporter.step("Get current gRPC metrics for method 'List'"):
metrics_counter_list = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="List")
with reporter.step(f"Get container list"):
list_containers(default_wallet, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=List, 'the counter should increase by 1'"):
metrics_counter_list += 1
check_metrics_counter(
[node],
counter_exp=metrics_counter_list,
command="grpc_server_handled_total",
service="ContainerService",
method="List",
)
@allure.title("GRPC metrics object operations")
def test_grpc_metrics_object_operations(
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, disable_policer
):
file_path = generate_file(simple_object_size.value)
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
with reporter.step("Get current gRPC metrics for method 'Put'"):
metrics_counter_put = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Put")
with reporter.step("Put object to selected node"):
oid = put_object(default_wallet, file_path, cid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by 1'"):
metrics_counter_put += 1
check_metrics_counter(
[node],
counter_exp=metrics_counter_put,
command="grpc_server_handled_total",
service="ObjectService",
method="Put",
)
with reporter.step("Get current gRPC metrics for method 'Get'"):
metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Get")
with reporter.step(f"Get object"):
get_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"):
metrics_counter_get += 1
check_metrics_counter(
[node],
counter_exp=metrics_counter_get,
command="grpc_server_handled_total",
service="ObjectService",
method="Get",
)
with reporter.step("Get current gRPC metrics for method 'Search'"):
metrics_counter_search = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Search")
with reporter.step(f"Search object"):
search_object(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Search, 'the counter should increase by 1'"):
metrics_counter_search += 1
check_metrics_counter(
[node],
counter_exp=metrics_counter_search,
command="grpc_server_handled_total",
service="ObjectService",
method="Search",
)
with reporter.step("Get current gRPC metrics for method 'Head'"):
metrics_counter_head = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Head")
with reporter.step(f"Head object"):
head_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Head, 'the counter should increase by 1'"):
metrics_counter_head += 1
check_metrics_counter(
[node],
counter_exp=metrics_counter_head,
command="grpc_server_handled_total",
service="ObjectService",
method="Head",
)
@allure.title("GRPC metrics Tree healthcheck")
def test_grpc_metrics_tree_service(self, cluster: Cluster, healthcheck: Healthcheck):
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
with reporter.step("Get current gRPC metrics for Healthcheck"):
metrics_counter = get_metrics_value(node, command="grpc_server_handled_total", service="TreeService", method="Healthcheck")
with reporter.step("Query Tree healthcheck status"):
healthcheck.tree_healthcheck(node)
with reporter.step(f"Check gRPC metrics for Healthcheck, 'the counter should increase'"):
check_metrics_counter(
[node],
">",
metrics_counter,
command="grpc_server_handled_total",
service="TreeService",
method="Healthcheck",
)
@allure.title("GRPC metrics Tree list")
def test_grpc_metrics_tree_list(self, default_wallet: WalletInfo, cluster: Cluster):
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
with reporter.step("Get current gRPC metrics for Tree List"):
metrics_counter = get_metrics_value(node, command="grpc_server_handled_total", service="TreeService", method="TreeList")
with reporter.step("Query Tree List"):
get_tree_list(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics for Tree List, 'the counter should increase by 1'"):
metrics_counter += 1
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command="grpc_server_handled_total",
service="TreeService",
method="TreeList",
)

View file

@ -1,68 +0,0 @@
import random
import re
from datetime import datetime, timezone
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.metrics import get_metrics_value
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
@pytest.mark.nightly
class TestLogsMetrics(ClusterTestBase):
@pytest.fixture
def revert_all(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.manager(ConfigStateManager).revert_all()
def restart_storage_service(self, cluster_state_controller: ClusterStateController) -> datetime:
config_manager = cluster_state_controller.manager(ConfigStateManager)
config_manager.csc.stop_services_of_type(StorageNode)
restart_time = datetime.now(timezone.utc)
config_manager.csc.start_services_of_type(StorageNode)
return restart_time
@wait_for_success(interval=10)
def check_metrics_in_node(self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps):
current_time = datetime.now(timezone.utc)
counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority)
assert counter_logs == counter_metrics, f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
@staticmethod
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, until_time: datetime, log_priority: str):
count_logs = 0
try:
logs = cluster_node.host.get_filtered_logs(
log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority
)
result = re.findall(rf":\s+{log_level}\s+", logs)
count_logs += len(result)
except RuntimeError as e:
...
return count_logs
@allure.title("Metrics for the log counter")
def test_log_counter_metrics(self, cluster_state_controller: ClusterStateController, revert_all):
restart_time = self.restart_storage_service(cluster_state_controller)
with reporter.step("Select random node"):
node = random.choice(self.cluster.cluster_nodes)
with reporter.step(f"Check metrics count logs with level 'info'"):
self.check_metrics_in_node(
node,
restart_time,
log_priority="6..6",
command="frostfs_node_logger_entry_count",
level="info",
dropped="false",
)
with reporter.step(f"Check metrics count logs with level 'error'"):
self.check_metrics_in_node(node, restart_time, command="frostfs_node_logger_entry_count", level="error", dropped="false")

View file

@ -1,294 +0,0 @@
import random
import re
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container, delete_container, search_nodes_with_container
from frostfs_testlib.steps.cli.object import delete_object, lock_object, put_object, put_object_to_random_node
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.nightly
class TestObjectMetrics(ClusterTestBase):
@allure.title("Object metrics of removed container (obj_size={object_size})")
def test_object_metrics_removed_container(self, object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster):
file_path = generate_file(object_size.value)
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
copies = 2
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
with reporter.step("Put object to random node"):
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, cluster)
with reporter.step("Check metric appears in node where the object is located"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
check_metrics_counter(
object_nodes,
counter_exp=copies,
command="frostfs_node_engine_container_objects_total",
cid=cid,
type="user",
)
with reporter.step("Delete container"):
delete_container(default_wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
with reporter.step("Tick Epoch"):
self.tick_epochs(epochs_to_tick=2, wait_block=2)
with reporter.step("Check metrics of removed containers doesn't appear in the storage node"):
check_metrics_counter(object_nodes, counter_exp=0, command="frostfs_node_engine_container_objects_total", cid=cid, type="user")
check_metrics_counter(object_nodes, counter_exp=0, command="frostfs_node_engine_container_size_byte", cid=cid)
for node in object_nodes:
all_metrics = node.metrics.storage.get_metrics_search_by_greps(command="frostfs_node_engine_container_size_byte")
assert cid not in all_metrics.stdout, "metrics of removed containers shouldn't appear in the storage node"
@allure.title("Object metrics, locked object (obj_size={object_size}, policy={placement_policy})")
@pytest.mark.parametrize("placement_policy", ["REP 1 IN X CBF 1 SELECT 1 FROM * AS X", "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"])
def test_object_metrics_blocked_object(
self, object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, placement_policy: str
):
file_path = generate_file(object_size.value)
metric_step = int(re.search(r"REP\s(\d+)", placement_policy).group(1))
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
with reporter.step("Search container nodes"):
container_nodes = search_nodes_with_container(
wallet=default_wallet,
cid=cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
cluster=cluster,
)
with reporter.step("Get current metrics for metric_type=user"):
objects_metric_counter = 0
for node in container_nodes:
objects_metric_counter += get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
with reporter.step("Put object to container node"):
oid = put_object(default_wallet, file_path, cid, self.shell, container_nodes[0].storage_node.get_rpc_endpoint())
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
objects_metric_counter += metric_step
check_metrics_counter(
container_nodes,
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=metric_step,
command="frostfs_node_engine_container_objects_total",
cid=cid,
type="user",
)
with reporter.step("Delete object"):
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
objects_metric_counter -= metric_step
check_metrics_counter(
container_nodes,
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=0,
command="frostfs_node_engine_container_objects_total",
cid=cid,
type="user",
)
with reporter.step("Put object and lock it to next epoch"):
oid = put_object(default_wallet, file_path, cid, self.shell, container_nodes[0].storage_node.get_rpc_endpoint())
current_epoch = self.get_epoch()
lock_object(
default_wallet,
cid,
oid,
self.shell,
container_nodes[0].storage_node.get_rpc_endpoint(),
expire_at=current_epoch + 1,
)
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
objects_metric_counter += metric_step
check_metrics_counter(
container_nodes,
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=metric_step,
command="frostfs_node_engine_container_objects_total",
cid=cid,
type="user",
)
with reporter.step(f"Wait until remove locking 'the counter doesn't change'"):
self.tick_epochs(epochs_to_tick=2)
check_metrics_counter(
container_nodes,
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
with reporter.step("Delete object"):
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
objects_metric_counter -= metric_step
check_metrics_counter(
container_nodes,
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=0,
command="frostfs_node_engine_container_objects_total",
cid=cid,
type="user",
)
with reporter.step("Put object with expire_at"):
current_epoch = self.get_epoch()
oid = put_object(
default_wallet,
file_path,
cid,
self.shell,
container_nodes[0].storage_node.get_rpc_endpoint(),
expire_at=current_epoch + 1,
)
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
objects_metric_counter += metric_step
check_metrics_counter(
container_nodes,
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=metric_step,
command="frostfs_node_engine_container_objects_total",
cid=cid,
type="user",
)
with reporter.step("Tick Epoch"):
self.tick_epochs(epochs_to_tick=2)
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
objects_metric_counter -= metric_step
check_metrics_counter(
container_nodes,
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=0,
command="frostfs_node_engine_container_objects_total",
cid=cid,
type="user",
)
@allure.title("Object metrics, stop the node (obj_size={object_size})")
def test_object_metrics_stop_node(
self,
object_size: ObjectSize,
default_wallet: WalletInfo,
cluster_state_controller: ClusterStateController,
):
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
file_path = generate_file(object_size.value)
copies = 2
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy)
with reporter.step(f"Check object metrics in container 'should be zero'"):
check_metrics_counter(
self.cluster.cluster_nodes,
counter_exp=0,
command="frostfs_node_engine_container_objects_total",
type="user",
cid=cid,
)
with reporter.step("Get current metrics for each nodes"):
objects_metric_counter: dict[ClusterNode:int] = {}
for node in self.cluster.cluster_nodes:
objects_metric_counter[node] = get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
with reporter.step("Put object"):
oid = put_object(default_wallet, file_path, cid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, self.cluster.storage_nodes)
object_nodes = [
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes
]
with reporter.step(f"Check metrics in object nodes 'the counter should increase by {copies}'"):
counter_exp = sum(objects_metric_counter[node] for node in object_nodes) + copies
check_metrics_counter(object_nodes, counter_exp=counter_exp, command="frostfs_node_engine_objects_total", type="user")
check_metrics_counter(
object_nodes,
counter_exp=copies,
command="frostfs_node_engine_container_objects_total",
type="user",
cid=cid,
)
with reporter.step(f"Select node to stop"):
node_to_stop = random.choice(object_nodes)
alive_nodes = set(object_nodes).difference({node_to_stop})
with reporter.step(f"Stop the node, wait until the object is replicated to another node"):
cluster_state_controller.stop_node_host(node_to_stop, "hard")
objects_metric_counter[node_to_stop] += 1
with reporter.step(f"Check metric in alive nodes 'the counter should increase'"):
counter_exp = sum(objects_metric_counter[node] for node in alive_nodes)
check_metrics_counter(alive_nodes, ">=", counter_exp, command="frostfs_node_engine_objects_total", type="user")
with reporter.step("Start node"):
cluster_state_controller.start_node_host(node_to_stop)
with reporter.step(f"Check metric in restarted node, 'the counter doesn't change'"):
check_metrics_counter(
object_nodes,
counter_exp=copies,
command="frostfs_node_engine_container_objects_total",
type="user",
cid=cid,
)

View file

@ -1,170 +0,0 @@
import random
import re
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object, put_object
from frostfs_testlib.steps.metrics import check_metrics_counter
from frostfs_testlib.steps.node_management import node_shard_list, node_shard_set_mode
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers import ShardsWatcher
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel, wait_for_success
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.nightly
class TestShardMetrics(ClusterTestBase):
@pytest.fixture()
@allure.title("Get two shards for set mode")
def two_shards_and_node(self, cluster: Cluster) -> tuple[str, str, ClusterNode]:
node = random.choice(cluster.cluster_nodes)
shards = node_shard_list(node.storage_node)
two_shards = random.sample(shards, k=2)
yield two_shards[0], two_shards[1], node
for shard in two_shards:
node_shard_set_mode(node.storage_node, shard, "read-write")
node_shard_list(node.storage_node)
@pytest.fixture()
@allure.title("Revert all shards mode")
def revert_all_shards_mode(self):
yield
parallel(self.set_shard_rw_mode, self.cluster.cluster_nodes)
def set_shard_rw_mode(self, node: ClusterNode):
watcher = ShardsWatcher(node)
shards = watcher.get_shards()
for shard in shards:
watcher.set_shard_mode(shard["shard_id"], mode="read-write")
watcher.await_for_all_shards_status(status="read-write")
@staticmethod
def get_error_count_from_logs(cluster_node: ClusterNode, object_path: str, object_name: str):
error_count = 0
try:
logs = cluster_node.host.get_filtered_logs("error count", unit="frostfs-storage")
# search error logs for current object
for error_line in logs.split("\n"):
if object_path in error_line and object_name in error_line:
result = re.findall(r'"error\scount":\s(\d+)', error_line)
error_count += sum(map(int, result))
except RuntimeError as e:
...
return error_count
@staticmethod
@wait_for_success(180, 30)
def get_object_path_and_name_file(oid: str, cid: str, node: ClusterNode) -> tuple[str, str]:
oid_path = f"{oid[0]}/{oid[1]}/{oid[2]}/{oid[3]}"
object_path = None
with reporter.step("Search object file"):
node_shell = node.storage_node.host.get_shell()
data_path = node.storage_node.get_data_directory()
all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip()
for data_dir in all_datas.replace(".", "").strip().split("\n"):
check_dir = node_shell.exec(f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0").stdout
if "1" in check_dir:
object_path = f"{data_path}/data/{data_dir}/data/{oid_path}"
object_name = f"{oid[4:]}.{cid}"
break
assert object_path is not None, f"{oid} object not found in directory - {data_path}/data"
return object_path, object_name
@allure.title("Metric for shard mode")
def test_shard_metrics_set_mode(self, two_shards_and_node: tuple[str, str, ClusterNode]):
metrics_counter = 1
shard1, shard2, node = two_shards_and_node
with reporter.step("Shard1 set to mode 'read-only'"):
node_shard_set_mode(node.storage_node, shard1, "read-only")
with reporter.step(f"Check shard metrics, 'the mode will change to 'READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command="frostfs_node_engine_mode_info",
mode="READ_ONLY",
shard_id=shard1,
)
with reporter.step("Shard2 set to mode 'degraded-read-only'"):
node_shard_set_mode(node.storage_node, shard2, "degraded-read-only")
with reporter.step(f"Check shard metrics, 'the mode will change to 'DEGRADED_READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command="frostfs_node_engine_mode_info",
mode="DEGRADED_READ_ONLY",
shard_id=shard2,
)
with reporter.step("Both shards set to mode 'read-write'"):
for shard in [shard1, shard2]:
node_shard_set_mode(node.storage_node, shard, "read-write")
with reporter.step(f"Check shard metrics, 'the mode will change to 'READ_WRITE'"):
for shard in [shard1, shard2]:
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command="frostfs_node_engine_mode_info",
mode="READ_WRITE",
shard_id=shard,
)
@allure.title("Metric for error count on shard")
def test_shard_metrics_error_count(self, max_object_size: int, default_wallet: WalletInfo, cluster: Cluster, revert_all_shards_mode):
file_path = generate_file(round(max_object_size * 0.8))
with reporter.step(f"Create container"):
cid = create_container(
wallet=default_wallet,
shell=self.shell,
endpoint=cluster.default_rpc_endpoint,
rule="REP 1 CBF 1",
basic_acl=EACL_PUBLIC_READ_WRITE,
)
with reporter.step("Put object"):
oid = put_object(default_wallet, file_path, cid, self.shell, cluster.default_rpc_endpoint)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
node = random.choice(object_nodes)
with reporter.step("Search object in system."):
object_path, object_name = self.get_object_path_and_name_file(oid, cid, node)
with reporter.step("Block read file"):
node.host.get_shell().exec(f"chmod a-r {object_path}/{object_name}")
with reporter.step("Get object, expect error"):
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
get_object(
wallet=default_wallet,
cid=cid,
oid=oid,
shell=self.shell,
endpoint=node.storage_node.get_rpc_endpoint(),
)
with reporter.step(f"Get shard error count from logs"):
counter = self.get_error_count_from_logs(node, object_path, object_name)
with reporter.step(f"Check shard error metrics"):
check_metrics_counter([node], counter_exp=counter, command="frostfs_node_engine_errors_total")

View file

@ -10,24 +10,22 @@ from frostfs_testlib.resources.error_patterns import (
INVALID_OFFSET_SPECIFIER, INVALID_OFFSET_SPECIFIER,
INVALID_RANGE_OVERFLOW, INVALID_RANGE_OVERFLOW,
INVALID_RANGE_ZERO_LENGTH, INVALID_RANGE_ZERO_LENGTH,
OBJECT_ALREADY_REMOVED,
OUT_OF_RANGE, OUT_OF_RANGE,
) )
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import ( from frostfs_testlib.steps.cli.object import (
get_object_from_random_node, get_object_from_random_node,
get_range, get_range,
get_range_hash, get_range_hash,
head_object, head_object,
put_object,
put_object_to_random_node, put_object_to_random_node,
search_object, search_object,
) )
from frostfs_testlib.steps.complex_object_actions import get_complex_object_split_ranges from frostfs_testlib.steps.complex_object_actions import get_complex_object_split_ranges
from frostfs_testlib.steps.storage_object import delete_object, delete_objects from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_policy import get_complex_object_copies, get_simple_object_copies from frostfs_testlib.steps.storage_policy import get_complex_object_copies, get_simple_object_copies
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
@ -54,7 +52,9 @@ RANGE_MAX_LEN = 500
STATIC_RANGES = {} STATIC_RANGES = {}
def generate_ranges(storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, cluster: Cluster) -> list[(int, int)]: def generate_ranges(
storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, cluster: Cluster
) -> list[(int, int)]:
file_range_step = storage_object.size / RANGES_COUNT file_range_step = storage_object.size / RANGES_COUNT
file_ranges = [] file_ranges = []
@ -89,35 +89,12 @@ def generate_ranges(storage_object: StorageObjectInfo, max_object_size: int, she
return file_ranges_to_test return file_ranges_to_test
@pytest.fixture(scope="module")
def common_container(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster) -> str:
rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
with reporter.step(f"Create container with {rule} and put object"):
cid = create_container(default_wallet, client_shell, cluster.default_rpc_endpoint, rule)
return cid
@pytest.fixture(scope="module")
def container_nodes(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, common_container: str) -> list[ClusterNode]:
return search_nodes_with_container(default_wallet, common_container, client_shell, cluster.default_rpc_endpoint, cluster)
@pytest.fixture(scope="module")
def non_container_nodes(cluster: Cluster, container_nodes: list[ClusterNode]) -> list[ClusterNode]:
return list(set(cluster.cluster_nodes) - set(container_nodes))
@pytest.fixture( @pytest.fixture(
# Scope session to upload/delete each files set only once # Scope session to upload/delete each files set only once
scope="module" scope="module"
) )
def storage_objects( def storage_objects(
default_wallet: WalletInfo, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, object_size: ObjectSize, placement_policy: PlacementPolicy
client_shell: Shell,
cluster: Cluster,
object_size: ObjectSize,
placement_policy: PlacementPolicy,
) -> list[StorageObjectInfo]: ) -> list[StorageObjectInfo]:
wallet = default_wallet wallet = default_wallet
# Separate containers for complex/simple objects to avoid side-effects # Separate containers for complex/simple objects to avoid side-effects
@ -155,14 +132,6 @@ def storage_objects(
delete_objects(storage_objects, client_shell, cluster) delete_objects(storage_objects, client_shell, cluster)
@pytest.fixture()
def expected_object_copies(placement_policy: PlacementPolicy) -> int:
if placement_policy.name == "rep":
return 2
return 4
@pytest.mark.nightly
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
class TestObjectApi(ClusterTestBase): class TestObjectApi(ClusterTestBase):
@ -171,7 +140,6 @@ class TestObjectApi(ClusterTestBase):
self, self,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
expected_object_copies: int,
): ):
""" """
Validate object storage policy Validate object storage policy
@ -195,7 +163,7 @@ class TestObjectApi(ClusterTestBase):
shell=self.shell, shell=self.shell,
nodes=self.cluster.storage_nodes, nodes=self.cluster.storage_nodes,
) )
assert copies == expected_object_copies, f"Expected {expected_object_copies} copies" assert copies == 2, "Expected 2 copies"
@allure.title("Get object by native API (obj_size={object_size}, policy={placement_policy})") @allure.title("Get object by native API (obj_size={object_size}, policy={placement_policy})")
def test_get_object_api(self, storage_objects: list[StorageObjectInfo]): def test_get_object_api(self, storage_objects: list[StorageObjectInfo]):
@ -241,22 +209,6 @@ class TestObjectApi(ClusterTestBase):
) )
self.check_header_is_presented(head_info, storage_object_2.attributes) self.check_header_is_presented(head_info, storage_object_2.attributes)
@allure.title("Head deleted object with --raw arg (obj_size={object_size}, policy={placement_policy})")
def test_object_head_raw(self, default_wallet: str, object_size: ObjectSize, placement_policy: PlacementPolicy):
with reporter.step("Create container"):
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy.value)
with reporter.step("Upload object"):
file_path = generate_file(object_size.value)
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, self.cluster)
with reporter.step("Delete object"):
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step("Call object head --raw and expect error"):
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
head_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint, is_raw=True)
@allure.title("Search objects by native API (obj_size={object_size}, policy={placement_policy})") @allure.title("Search objects by native API (obj_size={object_size}, policy={placement_policy})")
def test_search_object_api(self, storage_objects: list[StorageObjectInfo]): def test_search_object_api(self, storage_objects: list[StorageObjectInfo]):
""" """
@ -347,7 +299,9 @@ class TestObjectApi(ClusterTestBase):
endpoint=self.cluster.default_rpc_endpoint, endpoint=self.cluster.default_rpc_endpoint,
)["header"] )["header"]
object_type = header["objectType"] object_type = header["objectType"]
assert object_type == "TOMBSTONE", f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}" assert (
object_type == "TOMBSTONE"
), f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}"
@allure.title("Get range hash by native API (obj_size={object_size}, policy={placement_policy})") @allure.title("Get range hash by native API (obj_size={object_size}, policy={placement_policy})")
@pytest.mark.grpc_api @pytest.mark.grpc_api
@ -408,7 +362,8 @@ class TestObjectApi(ClusterTestBase):
range_cut=range_cut, range_cut=range_cut,
) )
assert ( assert (
get_file_content(file_path, content_len=range_len, mode="rb", offset=range_start) == range_content get_file_content(file_path, content_len=range_len, mode="rb", offset=range_start)
== range_content
), f"Expected range content to match {range_cut} slice of file payload" ), f"Expected range content to match {range_cut} slice of file payload"
@allure.title("[NEGATIVE] Get invalid range by native API (obj_size={object_size}, policy={placement_policy})") @allure.title("[NEGATIVE] Get invalid range by native API (obj_size={object_size}, policy={placement_policy})")
@ -426,7 +381,9 @@ class TestObjectApi(ClusterTestBase):
oids = [storage_object.oid for storage_object in storage_objects[:2]] oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size file_size = storage_objects[0].size
assert RANGE_MIN_LEN < file_size, f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})" assert (
RANGE_MIN_LEN < file_size
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test: list[tuple(int, int, str)] = [ file_ranges_to_test: list[tuple(int, int, str)] = [
# Offset is bigger than the file size, the length is small. # Offset is bigger than the file size, the length is small.
@ -471,7 +428,9 @@ class TestObjectApi(ClusterTestBase):
oids = [storage_object.oid for storage_object in storage_objects[:2]] oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size file_size = storage_objects[0].size
assert RANGE_MIN_LEN < file_size, f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})" assert (
RANGE_MIN_LEN < file_size
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test: list[tuple(int, int, str)] = [ file_ranges_to_test: list[tuple(int, int, str)] = [
# Offset is bigger than the file size, the length is small. # Offset is bigger than the file size, the length is small.
@ -502,77 +461,9 @@ class TestObjectApi(ClusterTestBase):
range_cut=range_cut, range_cut=range_cut,
) )
@allure.title("Get range from container and non-container nodes (object_size={object_size})")
def test_get_range_from_different_node(
self,
default_wallet: str,
common_container: str,
container_nodes: list[ClusterNode],
non_container_nodes: list[ClusterNode],
file_path: str,
):
with reporter.step("Put object to container"):
container_node = random.choice(container_nodes)
oid = put_object(default_wallet, file_path, common_container, self.shell, container_node.storage_node.get_rpc_endpoint())
with reporter.step("Get range from container node endpoint"):
get_range(
default_wallet,
common_container,
oid,
"0:10",
self.shell,
container_node.storage_node.get_rpc_endpoint(),
)
with reporter.step("Get range from non-container node endpoint"):
non_container_node = random.choice(non_container_nodes)
get_range(
default_wallet,
common_container,
oid,
"0:10",
self.shell,
non_container_node.storage_node.get_rpc_endpoint(),
)
@allure.title("Get range hash from container and non-container nodes (object_size={object_size})")
def test_get_range_hash_from_different_node(
self,
default_wallet: str,
common_container: str,
container_nodes: list[ClusterNode],
non_container_nodes: list[ClusterNode],
file_path: str,
):
with reporter.step("Put object to container"):
container_node = random.choice(container_nodes)
oid = put_object(default_wallet, file_path, common_container, self.shell, container_node.storage_node.get_rpc_endpoint())
with reporter.step("Get range hash from container node endpoint"):
get_range_hash(
default_wallet,
common_container,
oid,
"0:10",
self.shell,
container_node.storage_node.get_rpc_endpoint(),
)
with reporter.step("Get range hash from non-container node endpoint"):
non_container_node = random.choice(non_container_nodes)
get_range_hash(
default_wallet,
common_container,
oid,
"0:10",
self.shell,
non_container_node.storage_node.get_rpc_endpoint(),
)
def check_header_is_presented(self, head_info: dict, object_header: dict) -> None: def check_header_is_presented(self, head_info: dict, object_header: dict) -> None:
for key_to_check, val_to_check in object_header.items(): for key_to_check, val_to_check in object_header.items():
assert key_to_check in head_info["header"]["attributes"], f"Key {key_to_check} is found in {head_object}" assert key_to_check in head_info["header"]["attributes"], f"Key {key_to_check} is found in {head_object}"
assert head_info["header"]["attributes"].get(key_to_check) == str(val_to_check), f"Value {val_to_check} is equal" assert head_info["header"]["attributes"].get(key_to_check) == str(
val_to_check
), f"Value {val_to_check} is equal"

View file

@ -1,9 +1,9 @@
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.acl import form_bearertoken_file
from frostfs_testlib.steps.cli.container import ( from frostfs_testlib.steps.cli.container import (
REP_2_FOR_3_NODES_PLACEMENT_RULE, REP_2_FOR_3_NODES_PLACEMENT_RULE,
SINGLE_PLACEMENT_RULE, SINGLE_PLACEMENT_RULE,
@ -12,59 +12,76 @@ from frostfs_testlib.steps.cli.container import (
create_container, create_container,
) )
from frostfs_testlib.steps.cli.object import delete_object, get_object from frostfs_testlib.steps.cli.object import delete_object, get_object
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.steps.storage_object import StorageObjectInfo from frostfs_testlib.steps.storage_object import StorageObjectInfo
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from pytest import FixtureRequest from pytest import FixtureRequest
from ...helpers.bearer_token import create_bearer_token
from ...helpers.container_access import assert_full_access_to_container @pytest.fixture(scope="module")
@allure.title("Create bearer token for OTHERS with all operations allowed for all containers")
def bearer_token_file_all_allow(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster) -> str:
bearer = form_bearertoken_file(
default_wallet,
"",
[EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) for op in EACLOperation],
shell=client_shell,
endpoint=cluster.default_rpc_endpoint,
)
return bearer
@pytest.fixture(scope="session") @pytest.fixture(scope="module")
@allure.title("Create user container for bearer token usage") @allure.title("Create user container for bearer token usage")
def user_container(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, request: FixtureRequest) -> StorageContainer: def user_container(
rule = request.param if "param" in request.__dict__ else SINGLE_PLACEMENT_RULE default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, request: FixtureRequest
container_id = create_container(default_wallet, client_shell, cluster.default_rpc_endpoint, rule, PUBLIC_ACL) ) -> StorageContainer:
container_id = create_container(
default_wallet,
shell=client_shell,
rule=request.param,
basic_acl=EACL_PUBLIC_READ_WRITE,
endpoint=cluster.default_rpc_endpoint,
)
# Deliberately using s3gate wallet here to test bearer token # Deliberately using s3gate wallet here to test bearer token
s3_gate_wallet = WalletInfo.from_node(cluster.s3_gates[0]) s3gate = cluster.s3_gates[0]
return StorageContainer(StorageContainerInfo(container_id, s3_gate_wallet), client_shell, cluster) return StorageContainer(
StorageContainerInfo(container_id, WalletInfo.from_node(s3gate)),
client_shell,
@pytest.fixture(scope="session") cluster,
@allure.title("Create bearer token with allowed put for container") )
def bearer_token(frostfs_cli: FrostfsCli, temp_directory: str, user_container: StorageContainer, cluster: Cluster) -> str:
rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL)
return create_bearer_token(frostfs_cli, temp_directory, user_container.get_id(), rule, cluster.default_rpc_endpoint)
@pytest.fixture() @pytest.fixture()
def storage_objects( def storage_objects(
user_container: StorageContainer, user_container: StorageContainer,
bearer_token: str, bearer_token_file_all_allow: str,
object_size: ObjectSize, object_size: ObjectSize,
client_shell: Shell,
cluster: Cluster, cluster: Cluster,
) -> list[StorageObjectInfo]: ) -> list[StorageObjectInfo]:
epoch = get_epoch(client_shell, cluster)
storage_objects: list[StorageObjectInfo] = [] storage_objects: list[StorageObjectInfo] = []
for node in cluster.storage_nodes: for node in cluster.storage_nodes:
storage_objects.append( storage_objects.append(
user_container.generate_object( user_container.generate_object(
object_size.value, object_size.value,
bearer_token=bearer_token, epoch + 3,
bearer_token=bearer_token_file_all_allow,
endpoint=node.get_rpc_endpoint(), endpoint=node.get_rpc_endpoint(),
) )
) )
return storage_objects return storage_objects
@pytest.mark.nightly @pytest.mark.smoke
@pytest.mark.bearer @pytest.mark.bearer
@pytest.mark.ape
class TestObjectApiWithBearerToken(ClusterTestBase): class TestObjectApiWithBearerToken(ClusterTestBase):
@allure.title("Object can be deleted from any node using s3gate wallet with bearer token (obj_size={object_size})") @allure.title("Object can be deleted from any node using s3gate wallet with bearer token (obj_size={object_size})")
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -75,10 +92,10 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
def test_delete_object_with_s3_wallet_bearer( def test_delete_object_with_s3_wallet_bearer(
self, self,
storage_objects: list[StorageObjectInfo], storage_objects: list[StorageObjectInfo],
bearer_token: str, bearer_token_file_all_allow: str,
): ):
s3_gate_wallet = WalletInfo.from_node(self.cluster.s3_gates[0]) s3_gate_wallet = WalletInfo.from_node(self.cluster.s3_gates[0])
with reporter.step("Delete each object from first storage node"): with reporter.step("Try to delete each object from first storage node"):
for storage_object in storage_objects: for storage_object in storage_objects:
with expect_not_raises(): with expect_not_raises():
delete_object( delete_object(
@ -87,7 +104,7 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
storage_object.oid, storage_object.oid,
self.shell, self.shell,
endpoint=self.cluster.default_rpc_endpoint, endpoint=self.cluster.default_rpc_endpoint,
bearer=bearer_token, bearer=bearer_token_file_all_allow,
) )
@allure.title("Object can be fetched from any node using s3gate wallet with bearer token (obj_size={object_size})") @allure.title("Object can be fetched from any node using s3gate wallet with bearer token (obj_size={object_size})")
@ -100,17 +117,16 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
self, self,
user_container: StorageContainer, user_container: StorageContainer,
object_size: ObjectSize, object_size: ObjectSize,
bearer_token: str, bearer_token_file_all_allow: str,
): ):
s3_gate_wallet = WalletInfo.from_node(self.cluster.s3_gates[0]) s3_gate_wallet = WalletInfo.from_node(self.cluster.s3_gates[0])
with reporter.step("Put object to container"): with reporter.step("Put one object to container"):
epoch = self.get_epoch()
storage_object = user_container.generate_object( storage_object = user_container.generate_object(
object_size.value, object_size.value, epoch + 3, bearer_token=bearer_token_file_all_allow
bearer_token=bearer_token,
endpoint=self.cluster.default_rpc_endpoint,
) )
with reporter.step("Get object from each storage node"): with reporter.step("Try to fetch object from each storage node"):
for node in self.cluster.storage_nodes: for node in self.cluster.storage_nodes:
with expect_not_raises(): with expect_not_raises():
get_object( get_object(
@ -118,17 +134,6 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
self.shell, self.shell,
node.get_rpc_endpoint(), endpoint=node.get_rpc_endpoint(),
bearer_token, bearer=bearer_token_file_all_allow,
) )
@allure.title("Wildcard APE rule contains all permissions (obj_size={object_size})")
def test_ape_wildcard_contains_all_rules(
self,
other_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
bearer_token: str,
):
obj = storage_objects.pop()
with reporter.step(f"Assert all operations available with object"):
assert_full_access_to_container(other_wallet, obj.cid, obj.oid, obj.file_path, self.shell, self.cluster, bearer_token)

View file

@ -12,12 +12,11 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@pytest.mark.nightly
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
class TestObjectApiLifetime(ClusterTestBase): class TestObjectApiLifetime(ClusterTestBase):

View file

@ -1,5 +1,6 @@
import logging import logging
import re import re
from datetime import datetime
import allure import allure
import pytest import pytest
@ -29,9 +30,9 @@ from frostfs_testlib.storage.dataclasses.storage_object_info import LockObjectIn
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success
from frostfs_testlib.utils import datetime_utils, string_utils from frostfs_testlib.utils import datetime_utils
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -42,7 +43,7 @@ FIXTURE_OBJECT_LIFETIME = 10
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def user_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo: def user_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
with reporter.step("Create user wallet with container"): with reporter.step("Create user wallet with container"):
user = User(string_utils.unique_name("user-")) user = User(f"user_{hex(int(datetime.now().timestamp() * 1000000))}")
return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0]) return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
@ -66,7 +67,9 @@ def locked_storage_object(
current_epoch = ensure_fresh_epoch(client_shell, cluster) current_epoch = ensure_fresh_epoch(client_shell, cluster)
expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME) storage_object = user_container.generate_object(
object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
)
lock_object_id = lock_object( lock_object_id = lock_object(
storage_object.wallet, storage_object.wallet,
storage_object.cid, storage_object.cid,
@ -75,7 +78,9 @@ def locked_storage_object(
cluster.default_rpc_endpoint, cluster.default_rpc_endpoint,
lifetime=FIXTURE_LOCK_LIFETIME, lifetime=FIXTURE_LOCK_LIFETIME,
) )
storage_object.locks = [LockObjectInfo(storage_object.cid, lock_object_id, FIXTURE_LOCK_LIFETIME, expiration_epoch)] storage_object.locks = [
LockObjectInfo(storage_object.cid, lock_object_id, FIXTURE_LOCK_LIFETIME, expiration_epoch)
]
yield storage_object yield storage_object
@ -126,7 +131,6 @@ def verify_object_available(wallet: WalletInfo, cid: str, oid: str, shell: Shell
) )
@pytest.mark.nightly
@pytest.mark.grpc_object_lock @pytest.mark.grpc_object_lock
class TestObjectLockWithGrpc(ClusterTestBase): class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.fixture() @pytest.fixture()
@ -138,7 +142,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
with reporter.step("Creating locked object"): with reporter.step("Creating locked object"):
current_epoch = self.get_epoch() current_epoch = self.get_epoch()
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME) storage_object = user_container.generate_object(
object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
)
lock_object( lock_object(
storage_object.wallet, storage_object.wallet,
storage_object.cid, storage_object.cid,
@ -214,7 +220,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
1, 1,
) )
@allure.title("Lock must contain valid lifetime or expire_at field: (lifetime={wrong_lifetime}, expire-at={wrong_expire_at})") @allure.title(
"Lock must contain valid lifetime or expire_at field: (lifetime={wrong_lifetime}, expire-at={wrong_expire_at})"
)
# We operate with only lock object here so no complex object needed in this test # We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize("object_size", ["simple"], indirect=True) @pytest.mark.parametrize("object_size", ["simple"], indirect=True)
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -667,7 +675,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
with reporter.step("Generate two objects"): with reporter.step("Generate two objects"):
for epoch_i in range(2): for epoch_i in range(2):
storage_objects.append(user_container.generate_object(object_size.value, expire_at=current_epoch + epoch_i + 3)) storage_objects.append(
user_container.generate_object(object_size.value, expire_at=current_epoch + epoch_i + 3)
)
self.tick_epoch() self.tick_epoch()

View file

@ -1,415 +0,0 @@
import logging
import re
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.error_patterns import OBJECT_IS_LOCKED
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL_F
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile, get_file_hash
logger = logging.getLogger("NeoLogger")
@pytest.mark.nightly
@pytest.mark.grpc_without_user
class TestObjectApiWithoutUser(ClusterTestBase):
def _parse_oid(self, stdout: str) -> str:
id_str = stdout.strip().split("\n")[-2]
oid = id_str.split(":")[1]
return oid.strip()
def _parse_tombstone_oid(self, stdout: str) -> str:
id_str = stdout.split("\n")[1]
tombstone = id_str.split(":")[1]
return tombstone.strip()
@pytest.fixture(scope="function")
def public_container(self, default_wallet: WalletInfo) -> str:
with reporter.step("Create public container"):
cid_public = create_container(
default_wallet,
self.shell,
self.cluster.default_rpc_endpoint,
basic_acl=PUBLIC_ACL_F,
)
return cid_public
@pytest.fixture(scope="class")
def frostfs_cli(self, client_shell: Shell) -> FrostfsCli:
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC)
@allure.title("Get public container by native API with generate private key")
def test_get_container_with_generated_key(self, frostfs_cli: FrostfsCli, public_container: str):
"""
Validate `container get` native API with flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Get container with generate key"):
with expect_not_raises():
frostfs_cli.container.get(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Get list containers by native API with generate private key")
def test_list_containers_with_generated_key(self, frostfs_cli: FrostfsCli, default_wallet: WalletInfo, public_container: str):
"""
Validate `container list` native API with flag `--generate-key`.
"""
rpc_endpoint = self.cluster.default_rpc_endpoint
owner = default_wallet.get_address_from_json(0)
with reporter.step("List containers with generate key"):
with expect_not_raises():
result = frostfs_cli.container.list(rpc_endpoint, owner=owner, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect container in received containers list"):
containers = result.stdout.split()
assert public_container in containers
@allure.title("Get list of public container objects by native API with generate private key")
def test_list_objects_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str):
"""
Validate `container list_objects` native API with flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("List objects with generate key"):
with expect_not_raises():
result = frostfs_cli.container.list_objects(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect empty objects list"):
objects = result.stdout.split()
assert len(objects) == 0, objects
@allure.title("Search public container nodes by native API with generate private key")
def test_search_nodes_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str):
"""
Validate `container search_node` native API with flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Search nodes with generate key"):
with expect_not_raises():
frostfs_cli.container.search_node(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Put object into public container by native API with generate private key (obj_size={object_size})")
def test_put_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object put` into container with public ACL and flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
with expect_not_raises():
result = frostfs_cli.object.put(
rpc_endpoint,
cid,
file_path,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = self._parse_oid(result.stdout)
with reporter.step("List objects with generate key"):
result = frostfs_cli.container.list_objects(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect object in received objects list"):
objects = result.stdout.split()
assert oid in objects, objects
@allure.title("Get public container object by native API with generate private key (obj_size={object_size})")
def test_get_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object get` for container with public ACL and flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
expected_hash = get_file_hash(file_path)
with reporter.step("Put object with generate key"):
result = frostfs_cli.object.put(
rpc_endpoint,
cid,
file_path,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = self._parse_oid(result.stdout)
with reporter.step("Get object with generate key"):
with expect_not_raises():
frostfs_cli.object.get(
rpc_endpoint,
cid,
oid,
file=file_path,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
downloaded_hash = get_file_hash(file_path)
with reporter.step("Validate downloaded file"):
assert expected_hash == downloaded_hash
@allure.title("Head public container object by native API with generate private key (obj_size={object_size})")
def test_head_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object head` for container with public ACL and flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = frostfs_cli.object.put(
rpc_endpoint,
cid,
file_path,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = self._parse_oid(result.stdout)
with reporter.step("Head object with generate key"):
with expect_not_raises():
frostfs_cli.object.head(rpc_endpoint, cid, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Delete public container object by native API with generate private key (obj_size={object_size})")
def test_delete_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object delete` for container with public ACL and flag `--generate key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = frostfs_cli.object.put(
rpc_endpoint,
cid,
file_path,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = self._parse_oid(result.stdout)
with reporter.step("Delete object with generate key"):
with expect_not_raises():
result = frostfs_cli.object.delete(rpc_endpoint, cid, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
oid = self._parse_tombstone_oid(result.stdout)
with reporter.step("Head object with generate key"):
result = frostfs_cli.object.head(
rpc_endpoint,
cid,
oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
with reporter.step("Expect object type TOMBSTONE"):
object_type = re.search(r"(?<=type: )tombstone", result.stdout, re.IGNORECASE).group()
assert object_type == "TOMBSTONE", object_type
@allure.title("Lock public container object by native API with generate private key (obj_size={object_size})")
def test_lock_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object lock` for container with public ACL and flag `--generate-key`.
Attempt to delete the locked object.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = frostfs_cli.object.put(
rpc_endpoint,
cid,
file_path,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = self._parse_oid(result.stdout)
with reporter.step("Lock object with generate key"):
with expect_not_raises():
frostfs_cli.object.lock(
rpc_endpoint,
cid,
oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
lifetime=5,
)
with reporter.step("Delete locked object with generate key and expect error"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
frostfs_cli.object.delete(
rpc_endpoint,
cid,
oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
@allure.title("Search public container objects by native API with generate private key (obj_size={object_size})")
def test_search_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object search` for container with public ACL and flag `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = frostfs_cli.object.put(
rpc_endpoint,
cid,
file_path,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = self._parse_oid(result.stdout)
with reporter.step("Object search with generate key"):
with expect_not_raises():
result = frostfs_cli.object.search(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect object in received objects list of container"):
object_ids = re.findall(r"(\w{43,44})", result.stdout)
assert oid in object_ids
@allure.title("Get range of public container object by native API with generate private key (obj_size={object_size})")
def test_range_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object range` for container with public ACL and `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = frostfs_cli.object.put(
rpc_endpoint,
cid,
file_path,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = self._parse_oid(result.stdout)
with reporter.step("Get range of object with generate key"):
with expect_not_raises():
frostfs_cli.object.range(
rpc_endpoint,
cid,
oid,
"0:10",
file=file_path,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
@allure.title("Get hash of public container object by native API with generate private key (obj_size={object_size})")
def test_hash_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object hash` for container with public ACL and `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = frostfs_cli.object.put(
rpc_endpoint,
cid,
file_path,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = self._parse_oid(result.stdout)
with reporter.step("Get range hash of object with generate key"):
with expect_not_raises():
frostfs_cli.object.hash(
rpc_endpoint,
cid,
oid,
range="0:10",
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
@allure.title("Get public container object nodes by native API with generate private key (obj_size={object_size})")
def test_nodes_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
"""
Validate `object nodes` for container with public ACL and `--generate-key`.
"""
cid = public_container
rpc_endpoint = self.cluster.default_rpc_endpoint
with reporter.step("Put object with generate key"):
result = frostfs_cli.object.put(
rpc_endpoint,
cid,
file_path,
no_progress=True,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = self._parse_oid(result.stdout)
with reporter.step("Configure frostfs-cli for alive remote node"):
alive_node = self.cluster.cluster_nodes[0]
node_shell = alive_node.host.get_shell()
rpc_endpoint = alive_node.storage_node.get_rpc_endpoint()
node_frostfs_cli = FrostfsCli(node_shell, FROSTFS_CLI_EXEC)
with reporter.step("Get object nodes with generate key"):
with expect_not_raises():
node_frostfs_cli.object.nodes(
rpc_endpoint,
cid,
oid=oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)

View file

@ -1,752 +0,0 @@
import json
import time
import allure
import pytest
import yaml
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.cli.netmap_parser import NetmapParser
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, HOSTING_CONFIG_FILE, MORPH_BLOCK_TIME
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper
from frostfs_testlib.s3.interfaces import BucketContainerResolver, VersioningStatus
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...resources.common import S3_POLICY_FILE_LOCATION
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
if "ec_policy" not in metafunc.fixturenames:
return
with open(HOSTING_CONFIG_FILE, "r") as file:
hosting_config = yaml.full_load(file)
node_count = len(hosting_config["hosts"])
ec_map = {
4: ["EC 1.1", "EC 2.1", "EC 3.1", "EC 2.2"],
8: ["EC 5.3", "EC 3.2", "EC 7.1", "EC 4.4", "EC 3.1"],
16: ["EC 12.4", "EC 8.4", "EC 5.3", "EC 4.4"],
100: ["EC 12.4", "EC 8.4", "EC 5.3", "EC 4.4"],
}
nearest_node_count = ([4] + (list(filter(lambda x: x <= node_count, ec_map.keys()))))[-1]
metafunc.parametrize("ec_policy, node_count", ((ec_policy, node_count) for ec_policy in ec_map[nearest_node_count]))
@allure.title("Initialized remote FrostfsAdm")
@pytest.fixture
def frostfs_remote_adm(cluster: Cluster) -> FrostfsAdm:
node = cluster.cluster_nodes[0]
shell = node.host.get_shell()
return FrostfsAdm(shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
@pytest.mark.nightly
@pytest.mark.replication
@pytest.mark.ec_replication
class TestECReplication(ClusterTestBase):
def get_node_cli(self, cluster_node: ClusterNode, config: str) -> FrostfsCli:
shell = cluster_node.host.get_shell()
cli = FrostfsCli(shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=config)
self.cli_change_shards_mode: dict[FrostfsCli, str] = {cli: cluster_node.storage_node.get_control_endpoint()}
return cli
@pytest.fixture()
def restore_nodes_shards_mode(self):
yield
for cli, endpoint in self.cli_change_shards_mode.items():
cli.shards.set_mode(endpoint, mode="read-write", all=True)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@pytest.fixture()
def rep_count(self, object_size: ObjectSize) -> int:
rep_count = 3
if object_size.name == "complex":
rep_count *= int(COMPLEX_OBJECT_CHUNKS_COUNT) + 1 if COMPLEX_OBJECT_TAIL_SIZE else int(COMPLEX_OBJECT_CHUNKS_COUNT)
return rep_count
@wait_for_success(120, 5)
def wait_replication(self, total_chunks: int, client: GrpcClientWrapper, cid: str, oid: str, success: bool = True) -> None:
if not success:
assert not self.check_replication(total_chunks, client, cid, oid)
else:
assert self.check_replication(total_chunks, client, cid, oid)
@allure.title("Restore chunk maximum params in network params ")
@pytest.fixture
def restore_network_config(self, frostfs_remote_adm: FrostfsAdm) -> None:
yield
frostfs_remote_adm.morph.set_config(set_key_value='"MaxECDataCount=12" "MaxECParityCount=5"')
@reporter.step("Get object nodes output ")
def get_object_nodes(self, cli: FrostfsCli, cid: str, oid: str, endpoint: str = None) -> dict:
if not endpoint:
endpoint = self.cluster.default_rpc_endpoint
object_nodes = json.loads(cli.object.nodes(endpoint, cid, oid=oid, json=True, timeout=CLI_DEFAULT_TIMEOUT).stdout)
if object_nodes.get("errors"):
raise object_nodes["errors"]
return object_nodes
@reporter.step("Get parity chunk ")
def get_parity_chunk_object(self, cli: FrostfsCli, cid: str, oid: str, endpoint: str = None) -> Chunk:
chunks = self.get_object_nodes(cli, cid, oid, endpoint)["data_objects"]
return Chunk(**chunks[-1])
@reporter.step("Get data chunk ")
def get_data_chunk_object(self, cli: FrostfsCli, cid: str, oid: str, endpoint: str = None) -> Chunk:
chunks = self.get_object_nodes(cli, cid, oid, endpoint)["data_objects"]
return Chunk(**chunks[0])
@reporter.step("Check replication chunks={total_chunks} chunks ")
def check_replication(self, total_chunks: int, client: GrpcClientWrapper, cid: str, oid: str) -> bool:
object_nodes_info = client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
return len(object_nodes_info) == total_chunks
@pytest.fixture()
def include_excluded_nodes(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.include_all_excluded_nodes()
@allure.title("Disable Policer on all nodes")
@pytest.fixture()
def disable_policer(self, cluster_state_controller: ClusterStateController) -> None:
with reporter.step("Disable policer for nodes"):
cluster_state_controller.manager(ConfigStateManager).set_on_all_nodes(
service_type=StorageNode, values={"policer": {"unsafe_disable": True}}
)
yield
with reporter.step("Enable policer for nodes"):
cluster_state_controller.start_stopped_hosts()
cluster_state_controller.manager(ConfigStateManager).revert_all()
@wait_for_success(300, 15)
@reporter.step("Check count nodes chunks")
def wait_sync_count_chunks_nodes(self, grpc_client: GrpcClientWrapper, cid: str, oid: str, count: int):
all_chunks_after_include_node = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
chunks_nodes = [node for chunk in all_chunks_after_include_node for node in chunk.confirmed_nodes]
assert len(chunks_nodes) == count
@allure.title("Create container with EC policy (size={object_size})")
def test_create_container_with_ec_policy(self, object_size: ObjectSize, rep_count: int, grpc_client: GrpcClientWrapper) -> None:
test_file = generate_file(object_size.value)
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
with reporter.step("Put object in container."):
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check replication chunks."):
assert self.check_replication(rep_count, grpc_client, cid, oid)
@allure.title("Lose node with chunk data")
@pytest.mark.failover
def test_lose_node_with_data_chunk(
self,
grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
disable_policer: None,
) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 3.1", await_mode=True)
with reporter.step("Put object in container."):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check chunk replication on 4 nodes."):
assert self.check_replication(4, grpc_client, cid, oid)
with reporter.step("Search node data chunk"):
chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, cid, oid=oid)
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)
with reporter.step("Stop node with data chunk."):
cluster_state_controller.stop_node_host(chunk_node[0], "hard")
with reporter.step("Get object"):
node = list(set(self.cluster.cluster_nodes) - {chunk_node[0]})[0]
grpc_client.object.get(cid, oid, node.storage_node.get_rpc_endpoint())
with reporter.step("Start stopped node, and check replication chunks."):
cluster_state_controller.start_node_host(chunk_node[0])
self.wait_replication(4, grpc_client, cid, oid)
@allure.title("Lose node with chunk parity")
@pytest.mark.failover
def test_lose_node_with_parity_chunk(
self,
grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
disable_policer: None,
) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 3.1", await_mode=True)
with reporter.step("Put object in container."):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check chunk replication on 4 nodes."):
assert self.check_replication(4, grpc_client, cid, oid)
with reporter.step("Search node with parity chunk"):
chunk = grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, cid, oid=oid)
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)[0]
with reporter.step("Stop node parity chunk."):
cluster_state_controller.stop_node_host(chunk_node, "hard")
with reporter.step("Get object, expect success."):
node = list(set(self.cluster.cluster_nodes) - {chunk_node})[0]
grpc_client.object.get(cid, oid, node.storage_node.get_rpc_endpoint())
with reporter.step("Start stoped node, and check replication chunks."):
cluster_state_controller.start_node_host(chunk_node)
self.wait_replication(4, grpc_client, cid, oid)
@allure.title("Lose nodes with chunk data and parity")
@pytest.mark.failover
def test_lose_nodes_data_chunk_and_parity(
self,
grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
disable_policer: None,
) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 3.1", await_mode=True)
with reporter.step("Put object in container."):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check count chunks, expect 4."):
assert self.check_replication(4, grpc_client, cid, oid)
with reporter.step("Search node data chunk and node parity chunk"):
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, cid, oid=oid)
data_chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)[0]
parity_chunk = grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, cid, oid=oid)
parity_chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, parity_chunk)[0]
with reporter.step("Stop node with data chunk."):
cluster_state_controller.stop_node_host(data_chunk_node, "hard")
with reporter.step("Get object"):
node = list(set(self.cluster.cluster_nodes) - {data_chunk_node, parity_chunk_node})[0]
grpc_client.object.get(cid, oid, node.storage_node.get_rpc_endpoint())
with reporter.step("Start stopped host and check chunks."):
cluster_state_controller.start_node_host(data_chunk_node)
self.wait_replication(4, grpc_client, cid, oid)
with reporter.step("Stop node with parity chunk and one all node."):
cluster_state_controller.stop_node_host(data_chunk_node, "hard")
cluster_state_controller.stop_node_host(parity_chunk_node, "hard")
with reporter.step("Get object, expect error."):
with pytest.raises(RuntimeError):
grpc_client.object.get(cid, oid, node.storage_node.get_rpc_endpoint())
with reporter.step("Start stopped nodes and check replication chunk."):
cluster_state_controller.start_stopped_hosts()
self.wait_replication(4, grpc_client, cid, oid)
@allure.title("Policer work with chunk")
@pytest.mark.failover
def test_work_policer_with_nodes(
self,
simple_object_size: ObjectSize,
grpc_client: GrpcClientWrapper,
cluster_state_controller: ClusterStateController,
include_excluded_nodes: None,
) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
with reporter.step("Put object on container."):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check count chunks nodes on 3."):
assert self.check_replication(3, grpc_client, cid, oid)
with reporter.step("Search node with chunk."):
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, cid, oid=oid)
node_data_chunk = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)[0]
first_all_chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
with reporter.step("Remove chunk node from network map"):
cluster_state_controller.remove_node_from_netmap([node_data_chunk.storage_node])
with reporter.step("Tick epoch."):
alive_node = list(set(self.cluster.cluster_nodes) - {node_data_chunk})[0]
self.tick_epoch(alive_node.storage_node, 2)
with reporter.step("Wait replication chunk with different node."):
node = grpc_client.object.chunks.search_node_without_chunks(
first_all_chunks, self.cluster, alive_node.storage_node.get_rpc_endpoint()
)[0]
self.wait_replication(3, grpc_client, cid, oid)
with reporter.step("Get new chunks"):
second_all_chunks = grpc_client.object.chunks.get_all(node.storage_node.get_rpc_endpoint(), cid, oid)
with reporter.step("Check that oid no change."):
assert [chunk for chunk in second_all_chunks if data_chunk.object_id == chunk.object_id]
with reporter.step("Include node in netmap"):
cluster_state_controller.include_node_to_netmap(node_data_chunk.storage_node, alive_node.storage_node)
self.wait_sync_count_chunks_nodes(grpc_client, cid, oid, 3)
@allure.title("EC X.Y combinations (nodes={node_count},policy={ec_policy},size={object_size})")
def test_create_container_with_difference_count_nodes(
self, node_count: int, ec_policy: str, object_size: ObjectSize, grpc_client: GrpcClientWrapper
) -> None:
with reporter.step("Create container."):
expected_chunks = int(ec_policy.split(" ")[1].split(".")[0]) + int(ec_policy.split(" ")[1].split(".")[1])
if "complex" in object_size.name:
expected_chunks *= 4
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy=ec_policy, await_mode=True)
with reporter.step("Put object in container."):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check count object chunks."):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
assert len(chunks) == expected_chunks
with reporter.step("get object and check hash."):
file_with_node = grpc_client.object.get(cid, oid, self.cluster.default_rpc_endpoint)
assert get_file_hash(test_file) == get_file_hash(file_with_node)
@allure.title("Request PUT with copies_number flag")
def test_put_object_with_copies_number(self, grpc_client: GrpcClientWrapper, simple_object_size: ObjectSize) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
with reporter.step("Put object in container with copies number = 1"):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint, copies_number=1)
with reporter.step("Check that count chunks > 1."):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
assert len(chunks) > 1
@allure.title("Request PUT and 1 node off")
@pytest.mark.failover
def test_put_object_with_off_cnr_node(
self, grpc_client: GrpcClientWrapper, cluster_state_controller: ClusterStateController, simple_object_size: ObjectSize
) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 3.1", await_mode=True)
with reporter.step("Stop one node in container nodes"):
cluster_state_controller.stop_node_host(self.cluster.cluster_nodes[1], "hard")
with reporter.step("Put object in container, expect success for EC container."):
test_file = generate_file(simple_object_size.value)
grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint, copies_number=1)
@allure.title("Request PUT (size={object_size})")
def test_put_object_with_ec_cnr(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
with reporter.step("Put object in container"):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Get chunks object."):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
with reporter.step("Check header chunks object"):
for chunk in chunks:
chunk_head = grpc_client.object.head(
cid, chunk.object_id, self.cluster.default_rpc_endpoint, is_raw=True, json_output=False
).stdout
assert "EC header:" in chunk_head
@allure.title("Request GET (size={object_size})")
def test_get_object_in_ec_cnr(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1 CBF 1", await_mode=True)
with reporter.step("Put object in container"):
test_file = generate_file(object_size.value)
hash_origin_file = get_file_hash(test_file)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Get id all chunks."):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
with reporter.step("Search chunk node and not chunks node."):
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunks[0])[0]
not_chunk_node = grpc_client.object.chunks.search_node_without_chunks(chunks, self.cluster, self.cluster.default_rpc_endpoint)[
0
]
with reporter.step("GET request with chunk node, expect success"):
file_one = grpc_client.object.get(cid, oid, chunk_node.storage_node.get_rpc_endpoint())
hash_file_one = get_file_hash(file_one)
assert hash_file_one == hash_origin_file
with reporter.step("Get request with not chunk node"):
file_two = grpc_client.object.get(cid, oid, not_chunk_node.storage_node.get_rpc_endpoint())
hash_file_two = get_file_hash(file_two)
assert hash_file_two == hash_file_one == hash_origin_file
@allure.title("Request SEARCH with flags 'root' (size={object_size})")
def test_search_object_in_ec_cnr_root_flags(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
with reporter.step("Put object in container"):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Search operation with --root flags"):
search_output = grpc_client.object.search(cid, self.cluster.default_rpc_endpoint, root=True)
assert search_output[0] == oid
@allure.title("Request SEARCH check valid chunk id (size={object_size})")
def test_search_object_in_ec_cnr_chunk_id(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
with reporter.step("Put object in container"):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Search operation object"):
search_output = grpc_client.object.search(cid, self.cluster.default_rpc_endpoint)
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
for chunk in chunks:
assert chunk.object_id in search_output
@allure.title("Request SEARCH check no chunk index info (size={object_size})")
def test_search_object_in_ec_cnr(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
with reporter.step("Put object in container"):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Search operation all chunk"):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
for chunk in chunks:
chunk_search = grpc_client.object.search(cid, self.cluster.default_rpc_endpoint, oid=chunk.object_id)
assert "index" not in chunk_search
@allure.title("Request DELETE (size={object_size})")
@pytest.mark.failover
def test_delete_object_in_ec_cnr(
self, grpc_client: GrpcClientWrapper, object_size: ObjectSize, cluster_state_controller: ClusterStateController
) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
with reporter.step("Put object in container."):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check object chunks nodes."):
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
replication_count = 3 if object_size.name == "simple" else 3 * 4
assert len(chunks) == replication_count
with reporter.step("Delete object"):
grpc_client.object.delete(cid, oid, self.cluster.default_rpc_endpoint)
with reporter.step("Check that delete all chunks."):
for chunk in chunks:
with pytest.raises(RuntimeError, match="object already removed"):
grpc_client.object.head(cid, chunk.object_id, self.cluster.default_rpc_endpoint)
with reporter.step("Put second object."):
oid_second = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check second object chunks nodes."):
chunks_second_object = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid_second)
assert len(chunks_second_object) == replication_count
with reporter.step("Stop nodes with chunk."):
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunks_second_object[0])
cluster_state_controller.stop_node_host(chunk_node[0], "hard")
with reporter.step("Delete second object"):
cluster_nodes = list(set(self.cluster.cluster_nodes) - {chunk_node[0]})
grpc_client.object.delete(cid, oid_second, cluster_nodes[0].storage_node.get_rpc_endpoint())
with reporter.step("Check that delete all chunk second object."):
for chunk in chunks_second_object:
with pytest.raises(RuntimeError, match="object already removed|object not found"):
grpc_client.object.head(cid, chunk.object_id, cluster_nodes[0].storage_node.get_rpc_endpoint())
@allure.title("Request LOCK (size={object_size})")
@pytest.mark.failover
def test_lock_object_in_ec_cnr(
self,
grpc_client: GrpcClientWrapper,
frostfs_cli: FrostfsCli,
object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
include_excluded_nodes: None,
) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1", await_mode=True)
with reporter.step("Put object in container."):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check object chunks nodes."):
chunks_object = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
replication_count = 3 if object_size.name == "simple" else 3 * 4
assert len(chunks_object) == replication_count
with reporter.step("Put LOCK in object."):
# TODO Rework for the grpc_client when the netmap methods are implemented
epoch = frostfs_cli.netmap.epoch(self.cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout.strip()
grpc_client.object.lock(cid, oid, self.cluster.default_rpc_endpoint, expire_at=(int(epoch) + 5))
with reporter.step("Check don`t delete chunk"):
for chunk in chunks_object:
with pytest.raises(RuntimeError, match="Lock EC chunk failed"):
grpc_client.object.delete(cid, chunk.object_id, self.cluster.default_rpc_endpoint)
with reporter.step("Check enable LOCK object"):
with pytest.raises(RuntimeError, match="object is locked"):
grpc_client.object.delete(cid, oid, self.cluster.default_rpc_endpoint)
with reporter.step("Remove node in netmap."):
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunks_object[0])[0]
alive_node = list(set(self.cluster.cluster_nodes) - {chunk_node})[0]
cluster_state_controller.remove_node_from_netmap([chunk_node.storage_node])
with reporter.step("Check don`t delete chunk."):
for chunk in chunks_object:
with pytest.raises(RuntimeError, match="Lock EC chunk failed|object not found"):
grpc_client.object.delete(cid, chunk.object_id, alive_node.storage_node.get_rpc_endpoint())
with reporter.step("Check enable LOCK object"):
with pytest.raises(RuntimeError, match="object is locked"):
grpc_client.object.delete(cid, oid, alive_node.storage_node.get_rpc_endpoint())
with reporter.step("Include node in netmap"):
cluster_state_controller.include_node_to_netmap(chunk_node.storage_node, alive_node.storage_node)
@allure.title("Output MaxEC* params in frostf-scli (type={type_shards})")
@pytest.mark.parametrize("type_shards", ["Maximum count of data shards", "Maximum count of parity shards"])
def test_maxec_info_with_output_cli(self, frostfs_cli: FrostfsCli, type_shards: str) -> None:
with reporter.step("Get and check params"):
# TODO Rework for the grpc_client when the netmap methods are implemented
net_info = frostfs_cli.netmap.netinfo(self.cluster.default_rpc_endpoint).stdout
assert type_shards in net_info
@allure.title("Change MaxEC*Count params")
def test_change_max_data_shards_params(
self, frostfs_remote_adm: FrostfsAdm, frostfs_cli: FrostfsCli, restore_network_config: None
) -> None:
# TODO Rework for the grpc_client when the netmap methods are implemented
with reporter.step("Get now params MaxECDataCount and MaxECParityCount"):
node_netinfo = NetmapParser.netinfo(
frostfs_cli.netmap.netinfo(self.cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout
)
with reporter.step("Change params"):
frostfs_remote_adm.morph.set_config(set_key_value='"MaxECDataCount=5" "MaxECParityCount=3"')
with reporter.step("Get update params"):
update_net_info = NetmapParser.netinfo(
frostfs_cli.netmap.netinfo(self.cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout
)
with reporter.step("Check old and new params difference"):
assert (
update_net_info.maximum_count_of_data_shards not in node_netinfo.maximum_count_of_data_shards
and update_net_info.maximum_count_of_parity_shards not in node_netinfo.maximum_count_of_parity_shards
)
@allure.title("Check maximum count data and parity shards")
def test_change_over_max_parity_shards_params(self, frostfs_remote_adm: FrostfsAdm) -> None:
with reporter.step("Change over maximum params shards count."):
with pytest.raises(RuntimeError, match="MaxECDataCount and MaxECParityCount must be <= 256"):
frostfs_remote_adm.morph.set_config(set_key_value='"MaxECDataCount=130" "MaxECParityCount=130"')
@allure.title("Create container with EC policy and SELECT (SELECT={select})")
@pytest.mark.parametrize("select", [2, 4])
def test_create_container_with_select(self, select: int, grpc_client: GrpcClientWrapper) -> None:
with reporter.step("Create container"):
policy = f"EC 1.1 CBF 1 SELECT {select} FROM *"
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy=policy, await_mode=True)
with reporter.step("Check container nodes decomposed"):
container_nodes = grpc_client.container.nodes(self.cluster.default_rpc_endpoint, cid, self.cluster)
assert len(container_nodes) == select
@allure.title("Create container with EC policy and CBF (CBF={cbf})")
@pytest.mark.parametrize("cbf, expected_nodes", [(1, 2), (2, 4)])
def test_create_container_with_cbf(self, cbf: int, expected_nodes: int, grpc_client: GrpcClientWrapper) -> None:
with reporter.step("Create container."):
policy = f"EC 1.1 CBF {cbf}"
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy=policy, await_mode=True)
with reporter.step("Check expected container nodes."):
container_nodes = grpc_client.container.nodes(self.cluster.default_rpc_endpoint, cid, self.cluster)
assert len(container_nodes) == expected_nodes
@allure.title("Create container with EC policy and FILTER")
def test_create_container_with_filter(self, grpc_client: GrpcClientWrapper, simple_object_size: ObjectSize) -> None:
with reporter.step("Create Container."):
policy = "EC 1.1 IN RUS SELECT 2 FROM RU AS RUS FILTER Country EQ Russia AS RU"
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy=policy, await_mode=True)
with reporter.step("Put object in container."):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check object is decomposed exclusively on Russian nodes"):
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, cid, oid=oid)
parity_chunk = grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, cid, oid=oid)
node_data_chunk = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)
node_parity_chunk = grpc_client.object.chunks.get_chunk_node(self.cluster, parity_chunk)
for node in [node_data_chunk[1], node_parity_chunk[1]]:
assert "Russia" in node.country
@allure.title("Evacuation shard with chunk (type={type})")
@pytest.mark.parametrize("type, get_chunk", [("data", get_data_chunk_object), ("parity", get_parity_chunk_object)])
def test_evacuation_data_shard(
self,
restore_nodes_shards_mode: None,
frostfs_cli: FrostfsCli,
grpc_client: GrpcClientWrapper,
max_object_size: int,
type: str,
get_chunk,
) -> None:
with reporter.step("Create container."):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 1.1 CBF 1", await_mode=True)
with reporter.step("Put object in container."):
test_file = generate_file(max_object_size - 1000)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Get object chunks."):
chunk = get_chunk(self, frostfs_cli, cid, oid, self.cluster.default_rpc_endpoint)
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)
frostfs_node_cli = self.get_node_cli(chunk_node[0], config=chunk_node[0].storage_node.get_remote_wallet_config_path())
with reporter.step("Search shards chunk"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
shard_id = grpc_client.object.chunks.get_shard_chunk(chunk_node[0], chunk)
with reporter.step("Enable evacuation for shard"):
frostfs_node_cli.shards.set_mode(chunk_node[0].storage_node.get_control_endpoint(), mode="read-only", id=shard_id)
frostfs_node_cli.shards.evacuation_start(chunk_node[0].storage_node.get_control_endpoint(), shard_id, await_mode=True)
with reporter.step("Get object after evacuation shard"):
grpc_client.object.get(cid, oid, self.cluster.default_rpc_endpoint)
@allure.title("[NEGATIVE] Don`t create more 1 EC policy")
def test_more_one_ec_policy(self, grpc_client: GrpcClientWrapper) -> None:
with reporter.step("Create container with policy - 'EC 2.1 EC 1.1'"):
with pytest.raises(RuntimeError, match="can't parse placement policy"):
grpc_client.container.create(
self.cluster.default_rpc_endpoint, policy="EC 2.1 EC 1.1 CBF 1 SELECT 4 FROM *", await_mode=True
)
@allure.title("Create bucket with EC policy (s3_client={s3_client})")
@pytest.mark.parametrize("s3_policy, s3_client", [(S3_POLICY_FILE_LOCATION, AwsCliClient)], indirect=True)
def test_create_bucket_with_ec_location(
self, s3_client: S3ClientWrapper, bucket_container_resolver: BucketContainerResolver, grpc_client: GrpcClientWrapper
) -> None:
with reporter.step("Create bucket with EC location constrain"):
bucket = s3_client.create_bucket(location_constraint="ec3.1")
with reporter.step("Resolve container bucket"):
cid = bucket_container_resolver.resolve(self.cluster.cluster_nodes[0], bucket)
with reporter.step("Validate container policy"):
container = grpc_client.container.get(self.cluster.default_rpc_endpoint, cid, json_mode=True, timeout=CLI_DEFAULT_TIMEOUT)
assert container
@allure.title("Bucket object count chunks (s3_client={s3_client}, size={object_size})")
@pytest.mark.parametrize("s3_policy, s3_client", [(S3_POLICY_FILE_LOCATION, AwsCliClient)], indirect=True)
def test_count_chunks_bucket_with_ec_location(
self,
s3_client: S3ClientWrapper,
bucket_container_resolver: BucketContainerResolver,
grpc_client: GrpcClientWrapper,
object_size: ObjectSize,
) -> None:
with reporter.step("Create bucket with EC location constrain"):
bucket = s3_client.create_bucket(location_constraint="ec3.1")
with reporter.step("Enable versioning object"):
s3_client.put_bucket_versioning(bucket, VersioningStatus.ENABLED)
bucket_status = s3_client.get_bucket_versioning_status(bucket)
assert bucket_status == VersioningStatus.ENABLED.value
with reporter.step("Put object in bucket"):
test_file = generate_file(object_size.value)
bucket_object = s3_client.put_object(bucket, test_file)
with reporter.step("Watch replication count chunks"):
cid = bucket_container_resolver.resolve(self.cluster.cluster_nodes[0], bucket)
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, bucket_object)
expect_chunks = 4 if object_size.name == "simple" else 16
assert len(chunks) == expect_chunks
@allure.title("Replication chunk after drop (size={object_size})")
def test_drop_chunk_and_replication(self, grpc_client: GrpcClientWrapper, object_size: ObjectSize, rep_count: int) -> None:
with reporter.step("Create container"):
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy="EC 2.1 CBF 1", await_mode=True)
with reporter.step("Put object"):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Get all chunks"):
data_chunk = grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, cid, oid=oid)
with reporter.step("Search chunk node"):
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)
shell_chunk_node = chunk_node[0].host.get_shell()
with reporter.step("Get replication count"):
assert self.check_replication(rep_count, grpc_client, cid, oid)
with reporter.step("Delete chunk"):
frostfs_node_cli = FrostfsCli(
shell_chunk_node,
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
config_file=chunk_node[0].storage_node.get_remote_wallet_config_path(),
)
frostfs_node_cli.control.drop_objects(chunk_node[0].storage_node.get_control_endpoint(), f"{cid}/{data_chunk.object_id}")
with reporter.step("Wait replication count after drop one chunk"):
self.wait_replication(rep_count, grpc_client, cid, oid)

View file

@ -5,8 +5,8 @@ import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.cli.container import create_container, delete_container
from frostfs_testlib.steps.cli.object import head_object, put_object from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
@ -25,6 +25,11 @@ WAIT_FOR_REPLICATION = 60
@pytest.mark.failover @pytest.mark.failover
@pytest.mark.replication @pytest.mark.replication
class TestReplication(ClusterTestBase): class TestReplication(ClusterTestBase):
@pytest.fixture(autouse=True)
def start_stopped_nodes_after_test(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.start_stopped_hosts()
@allure.title("Replication (obj_size={object_size})") @allure.title("Replication (obj_size={object_size})")
def test_replication( def test_replication(
self, self,

View file

@ -3,56 +3,85 @@ import logging
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.acl import bearer_token_base64_from_file from frostfs_testlib.steps.acl import (
bearer_token_base64_from_file,
create_eacl,
form_bearertoken_file,
set_eacl,
sign_bearer,
wait_for_cache_expired,
)
from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from ....helpers.bearer_token import create_bearer_token
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@pytest.mark.skip("Skipped temporarly")
@pytest.mark.http_gate @pytest.mark.http_gate
@pytest.mark.http_put @pytest.mark.http_put
class Test_http_bearer(ClusterTestBase): class Test_http_bearer(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
@pytest.fixture(scope="class") @pytest.fixture(scope="class", autouse=True)
def user_container(self, frostfs_cli: FrostfsCli, default_wallet: WalletInfo, cluster: Cluster) -> str: @allure.title("[Class/Autouse]: Prepare wallet and deposit")
with reporter.step("Create container"): def prepare_wallet(self, default_wallet):
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, self.PLACEMENT_RULE, PUBLIC_ACL) Test_http_bearer.wallet = default_wallet
with reporter.step("Deny PUT via APE rule to container"): @pytest.fixture(scope="class")
role_condition = ape.Condition.by_role(ape.Role.OWNER) def user_container(self) -> str:
rule = ape.Rule(ape.Verb.DENY, ape.ObjectOperations.PUT, role_condition) return create_container(
frostfs_cli.ape_manager.add( wallet=self.wallet,
cluster.default_rpc_endpoint, rule.chain_id, target_name=cid, target_type="container", rule=rule.as_string() shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
) )
with reporter.step("Wait for one block"): @pytest.fixture(scope="class")
self.wait_for_blocks() def eacl_deny_for_others(self, user_container: str) -> None:
with reporter.step(f"Set deny all operations for {EACLRole.OTHERS} via eACL"):
return cid eacl = EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=EACLOperation.PUT)
set_eacl(
self.wallet,
user_container,
create_eacl(user_container, eacl, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def bearer_token(self, frostfs_cli: FrostfsCli, user_container: str, temp_directory: str, cluster: Cluster) -> str: def bearer_token_no_limit_for_others(self, user_container: str) -> str:
with reporter.step(f"Create bearer token for {ape.Role.OTHERS} with all operations allowed"): with reporter.step(f"Create bearer token for {EACLRole.OTHERS} with all operations allowed"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS) bearer = form_bearertoken_file(
rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, role_condition) self.wallet,
bearer = create_bearer_token(frostfs_cli, temp_directory, user_container, rule, cluster.default_rpc_endpoint) user_container,
[EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) for op in EACLOperation],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
sign=False,
)
bearer_signed = f"{bearer}_signed"
sign_bearer(
shell=self.shell,
wallet=self.wallet,
eacl_rules_file_from=bearer,
eacl_rules_file_to=bearer_signed,
json=False,
)
return bearer_token_base64_from_file(bearer_signed)
return bearer_token_base64_from_file(bearer) @allure.title(f"[NEGATIVE] Put object without bearer token for {EACLRole.OTHERS}")
def test_unable_put_without_bearer_token(
@allure.title(f"[NEGATIVE] Put object without bearer token for {ape.Role.OTHERS}") self, simple_object_size: ObjectSize, user_container: str, eacl_deny_for_others
def test_unable_put_without_bearer_token(self, simple_object_size: ObjectSize, user_container: str): ):
eacl_deny_for_others
upload_via_http_gate_curl( upload_via_http_gate_curl(
cid=user_container, cid=user_container,
filepath=generate_file(simple_object_size.value), filepath=generate_file(simple_object_size.value),
@ -60,17 +89,18 @@ class Test_http_bearer(ClusterTestBase):
error_pattern="access to object operation denied", error_pattern="access to object operation denied",
) )
@allure.title("Put object via HTTP using bearer token (object_size={object_size})")
def test_put_with_bearer_when_eacl_restrict( def test_put_with_bearer_when_eacl_restrict(
self, self,
object_size: ObjectSize, object_size: ObjectSize,
default_wallet: WalletInfo,
user_container: str, user_container: str,
bearer_token: str, eacl_deny_for_others,
bearer_token_no_limit_for_others: str,
): ):
eacl_deny_for_others
bearer = bearer_token_no_limit_for_others
file_path = generate_file(object_size.value) file_path = generate_file(object_size.value)
with reporter.step(f"Put object with bearer token for {ape.Role.OTHERS}, then get and verify hashes"): with reporter.step(f"Put object with bearer token for {EACLRole.OTHERS}, then get and verify hashes"):
headers = [f" -H 'Authorization: Bearer {bearer_token}'"] headers = [f" -H 'Authorization: Bearer {bearer}'"]
oid = upload_via_http_gate_curl( oid = upload_via_http_gate_curl(
cid=user_container, cid=user_container,
filepath=file_path, filepath=file_path,
@ -80,7 +110,7 @@ class Test_http_bearer(ClusterTestBase):
verify_object_hash( verify_object_hash(
oid=oid, oid=oid,
file_name=file_path, file_name=file_path,
wallet=default_wallet, wallet=self.wallet,
cid=user_container, cid=user_container,
shell=self.shell, shell=self.shell,
nodes=self.cluster.storage_nodes, nodes=self.cluster.storage_nodes,

View file

@ -20,7 +20,7 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ....helpers.utility import wait_for_gc_pass_on_storage_nodes from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
OBJECT_NOT_FOUND_ERROR = "not found" OBJECT_NOT_FOUND_ERROR = "not found"
@ -31,7 +31,6 @@ OBJECT_NOT_FOUND_ERROR = "not found"
) )
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading") @allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading") @allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
@pytest.mark.nightly
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.http_gate @pytest.mark.http_gate
class TestHttpGate(ClusterTestBase): class TestHttpGate(ClusterTestBase):
@ -103,6 +102,7 @@ class TestHttpGate(ClusterTestBase):
) )
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading") @allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading") @allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
@pytest.mark.skip("Skipped temporarly")
@pytest.mark.http_gate @pytest.mark.http_gate
@pytest.mark.http_put @pytest.mark.http_put
class TestHttpPut(ClusterTestBase): class TestHttpPut(ClusterTestBase):
@ -134,8 +134,12 @@ class TestHttpPut(ClusterTestBase):
file_path_large = generate_file(complex_object_size.value) file_path_large = generate_file(complex_object_size.value)
with reporter.step("Put objects using HTTP"): with reporter.step("Put objects using HTTP"):
oid_simple = upload_via_http_gate(cid=cid, path=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint) oid_simple = upload_via_http_gate(
oid_large = upload_via_http_gate(cid=cid, path=file_path_large, endpoint=self.cluster.default_http_gate_endpoint) cid=cid, path=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint
)
oid_large = upload_via_http_gate(
cid=cid, path=file_path_large, endpoint=self.cluster.default_http_gate_endpoint
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)): for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
verify_object_hash( verify_object_hash(
@ -350,7 +354,9 @@ class TestHttpPut(ClusterTestBase):
file_path_large = generate_file(complex_object_size.value) file_path_large = generate_file(complex_object_size.value)
with reporter.step("Put objects using curl utility"): with reporter.step("Put objects using curl utility"):
oid_simple = upload_via_http_gate_curl(cid=cid, filepath=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint) oid_simple = upload_via_http_gate_curl(
cid=cid, filepath=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint
)
oid_large = upload_via_http_gate_curl( oid_large = upload_via_http_gate_curl(
cid=cid, cid=cid,
filepath=file_path_large, filepath=file_path_large,

View file

@ -28,6 +28,7 @@ OBJECT_ALREADY_REMOVED_ERROR = "object already removed"
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@pytest.mark.skip("Skipped temporarly")
@pytest.mark.http_gate @pytest.mark.http_gate
@pytest.mark.http_put @pytest.mark.http_put
class Test_http_headers(ClusterTestBase): class Test_http_headers(ClusterTestBase):

View file

@ -22,7 +22,6 @@ from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@pytest.mark.nightly
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.http_gate @pytest.mark.http_gate
class Test_http_object(ClusterTestBase): class Test_http_object(ClusterTestBase):
@ -126,7 +125,7 @@ class Test_http_object(ClusterTestBase):
http_request_path=request, http_request_path=request,
) )
@allure.title("Put over s3, Get over HTTP with bucket name and key (object_size={object_size})") @allure.title("Put over s3, Get over HTTP with bucket name and key")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True) @pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_object_put_get_bucketname_key(self, object_size: ObjectSize, s3_client: S3ClientWrapper): def test_object_put_get_bucketname_key(self, object_size: ObjectSize, s3_client: S3ClientWrapper):
""" """

View file

@ -13,6 +13,7 @@ from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@pytest.mark.skip("Skipped temporarly")
@pytest.mark.http_gate @pytest.mark.http_gate
@pytest.mark.http_put @pytest.mark.http_put
class Test_http_streaming(ClusterTestBase): class Test_http_streaming(ClusterTestBase):

View file

@ -32,6 +32,7 @@ SYSTEM_EXPIRATION_TIMESTAMP = "System-Expiration-Timestamp"
SYSTEM_EXPIRATION_RFC3339 = "System-Expiration-RFC3339" SYSTEM_EXPIRATION_RFC3339 = "System-Expiration-RFC3339"
@pytest.mark.skip("Skipped temporarly")
@pytest.mark.http_gate @pytest.mark.http_gate
@pytest.mark.http_put @pytest.mark.http_put
class Test_http_system_header(ClusterTestBase): class Test_http_system_header(ClusterTestBase):

View file

@ -1,64 +1,62 @@
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import S3_BUCKET_DOES_NOT_ALLOW_ACL from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS, PUBLIC_READ_GRANTS, PUBLIC_READ_WRITE_GRANTS
from frostfs_testlib.s3 import S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.nightly
@pytest.mark.acl @pytest.mark.acl
@pytest.mark.s3_gate @pytest.mark.s3_gate
class TestS3GateACL: class TestS3GateACL:
@allure.title("Object ACL (s3_client={s3_client})") @allure.title("Object ACL (s3_client={s3_client})")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_s3_object_ACL(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize): def test_s3_object_ACL(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket, Check ACL is empty"):
s3_client.put_object(bucket, file_path) s3_client.put_object(bucket, file_path)
obj_acl = s3_client.get_object_acl(bucket, file_name)
assert obj_acl == [], f"Expected ACL is empty, got {obj_acl}"
with reporter.step("Verify private ACL is default"): with reporter.step("Put object ACL = public-read"):
object_grants = s3_client.get_object_acl(bucket, file_name) s3_client.put_object_acl(bucket, file_name, "public-read")
s3_helper.verify_acl_permissions(object_grants, PRIVATE_GRANTS) obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
with reporter.step("Verify put object ACL is restricted"): with reporter.step("Put object ACL = private"):
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL): s3_client.put_object_acl(bucket, file_name, "private")
object_grants = s3_client.put_object_acl(bucket, file_name, acl="public-read") obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
@allure.title("Create Bucket with different ACL (s3_client={s3_client})") with reporter.step("Put object with grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"):
def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper): s3_client.put_object_acl(
with reporter.step("Create bucket with ACL private"): bucket,
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private") file_name,
bucket_grants = s3_client.get_bucket_acl(bucket) grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS) )
obj_acl = s3_client.get_object_acl(bucket, file_name)
with reporter.step("Create bucket with ACL public-read"): s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
read_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read")
bucket_grants = s3_client.get_bucket_acl(read_bucket)
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS)
with reporter.step("Create bucket with ACL public-read-write"):
public_rw_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
bucket_grants = s3_client.get_bucket_acl(public_rw_bucket)
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS)
@allure.title("Bucket ACL (s3_client={s3_client})") @allure.title("Bucket ACL (s3_client={s3_client})")
@pytest.mark.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper): def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper):
with reporter.step("Create bucket with public-read-write ACL"): with reporter.step("Create bucket with ACL = public-read-write"):
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write") bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
bucket_grants = s3_client.get_bucket_acl(bucket) bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS) s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
with reporter.step("Change bucket ACL to private"): with reporter.step("Change bucket ACL to private"):
s3_client.put_bucket_acl(bucket, acl="private") s3_client.put_bucket_acl(bucket, acl="private")
bucket_grants = s3_client.get_bucket_acl(bucket) bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS) s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
with reporter.step("Change bucket ACL to public-read"): with reporter.step("Change bucket acl to --grant-write uri=http://acs.amazonaws.com/groups/global/AllUsers"):
s3_client.put_bucket_acl(bucket, acl="public-read") s3_client.put_bucket_acl(
bucket_grants = s3_client.get_bucket_acl(bucket) bucket,
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS) grant_write="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")

View file

@ -1,97 +1,66 @@
import string
from datetime import datetime, timedelta from datetime import datetime, timedelta
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils import string_utils
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
VALID_SYMBOLS_WITHOUT_DOT = string.ascii_lowercase + string.digits + "-"
VALID_AND_INVALID_SYMBOLS = string.ascii_letters + string.punctuation
# TODO: The dot symbol is temporarily not supported.
VALID_SYMBOLS_WITH_DOT = VALID_SYMBOLS_WITHOUT_DOT + "."
@pytest.mark.nightly
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_bucket @pytest.mark.s3_gate_bucket
class TestS3GateBucket: class TestS3GateBucket:
@allure.title("Bucket API (s3_client={s3_client})") @allure.title("Create Bucket with different ACL (s3_client={s3_client})")
def test_s3_buckets( def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper):
self,
s3_client: S3ClientWrapper,
simple_object_size: ObjectSize,
):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
file_path = generate_file(simple_object_size.value) with reporter.step("Create bucket with ACL private"):
file_name = s3_helper.object_key_from_file_path(file_path) bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private")
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
with reporter.step("Create buckets"): with reporter.step("Create bucket with ACL = public-read"):
bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True) bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read")
s3_helper.set_bucket_versioning(s3_client, bucket_1, VersioningStatus.ENABLED) bucket_acl_1 = s3_client.get_bucket_acl(bucket_1)
bucket_2 = s3_client.create_bucket() s3_helper.assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
with reporter.step("Check buckets are presented in the system"): with reporter.step("Create bucket with ACL public-read-write"):
buckets = s3_client.list_buckets() bucket_2 = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list" bucket_acl_2 = s3_client.get_bucket_acl(bucket_2)
assert bucket_2 in buckets, f"Expected bucket {bucket_2} is in the list" s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
with reporter.step("Bucket must be empty"): with reporter.step("Create bucket with ACL = authenticated-read"):
for bucket in (bucket_1, bucket_2): bucket_3 = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="authenticated-read")
with reporter.step("Verify default list command"): bucket_acl_3 = s3_client.get_bucket_acl(bucket_3)
objects_list = s3_client.list_objects(bucket) s3_helper.assert_s3_acl(acl_grants=bucket_acl_3, permitted_users="AllUsers")
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Verify V2 list command"): @allure.title("Create Bucket with different ACL by grant (s3_client={s3_client})")
objects_list = s3_client.list_objects_v2(bucket) def test_s3_create_bucket_with_grands(self, s3_client: S3ClientWrapper):
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Check buckets are visible with S3 head command"): with reporter.step("Create bucket with --grant-read"):
s3_client.head_bucket(bucket_1) bucket = s3_client.create_bucket(
s3_client.head_bucket(bucket_2) object_lock_enabled_for_bucket=True,
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
with reporter.step("Check we can put/list object with S3 commands"): with reporter.step("Create bucket with --grant-wtite"):
version_id = s3_client.put_object(bucket_1, file_path) bucket_1 = s3_client.create_bucket(
s3_client.head_object(bucket_1, file_name) object_lock_enabled_for_bucket=True,
grant_write="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl_1 = s3_client.get_bucket_acl(bucket_1)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
bucket_objects = s3_client.list_objects(bucket_1) with reporter.step("Create bucket with --grant-full-control"):
assert file_name in bucket_objects, f"Expected file {file_name} in objects list {bucket_objects}" bucket_2 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True,
with reporter.step("Try to delete not empty bucket and get error"): grant_full_control="uri=http://acs.amazonaws.com/groups/global/AllUsers",
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"): )
s3_client.delete_bucket(bucket_1) bucket_acl_2 = s3_client.get_bucket_acl(bucket_2)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
s3_client.head_bucket(bucket_1)
with reporter.step("Delete empty bucket_2"):
s3_client.delete_bucket(bucket_2)
with reporter.step("Check bucket_2 is deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_2)
buckets = s3_client.list_buckets()
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 not in buckets, f"Expected bucket {bucket_2} is not in the list"
with reporter.step("Delete object from bucket_1"):
s3_client.delete_object(bucket_1, file_name, version_id)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=[])
with reporter.step("Delete bucket_1"):
s3_client.delete_bucket(bucket_1)
with reporter.step("Check bucket_1 deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_1)
@allure.title("Create bucket with object lock (s3_client={s3_client})") @allure.title("Create bucket with object lock (s3_client={s3_client})")
def test_s3_bucket_object_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize): def test_s3_bucket_object_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
@ -148,87 +117,3 @@ class TestS3GateBucket:
s3_client.delete_bucket(bucket) s3_client.delete_bucket(bucket)
with pytest.raises(Exception, match=r".*Not Found.*"): with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket) s3_client.head_bucket(bucket)
@allure.title("Create bucket with valid name length (s3_client={s3_client}, length={length})")
@pytest.mark.parametrize("length", [3, 4, 32, 62, 63])
def test_s3_create_bucket_with_valid_length(self, s3_client: S3ClientWrapper, length: int):
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
while not (bucket_name[0].isalnum() and bucket_name[-1].isalnum()):
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
with reporter.step("Create bucket with valid name length"):
s3_client.create_bucket(bucket_name)
with reporter.step("Check bucket name in buckets"):
assert bucket_name in s3_client.list_buckets()
@allure.title("[NEGATIVE] Bucket with invalid name length should not be created (s3_client={s3_client}, length={length})")
@pytest.mark.parametrize("length", [2, 64, 254, 255, 256])
def test_s3_create_bucket_with_invalid_length(self, s3_client: S3ClientWrapper, length: int):
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
while not (bucket_name[0].isalnum() and bucket_name[-1].isalnum()):
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
with reporter.step("Create bucket with invalid name length and catch exception"):
with pytest.raises(Exception, match=".*(?:InvalidBucketName|Invalid bucket name).*"):
s3_client.create_bucket(bucket_name)
@allure.title("[NEGATIVE] Bucket with invalid name should not be created (s3_client={s3_client}, bucket_name={bucket_name})")
@pytest.mark.parametrize(
"bucket_name",
[
"BUCKET-1",
"buckeT-2",
# The following case for AWS CLI is not handled correctly
# "-bucket-3",
"bucket-4-",
".bucket-5",
"bucket-6.",
"bucket..7",
"bucket+8",
"bucket_9",
"bucket 10",
"127.10.5.11",
"xn--bucket-12",
"bucket-13-s3alias",
# The following names can be used in FrostFS but are prohibited by the AWS specification.
# "sthree-bucket-14"
# "sthree-configurator-bucket-15"
# "amzn-s3-demo-bucket-16"
# "sthree-bucket-17"
# "bucket-18--ol-s3"
# "bucket-19--x-s3"
# "bucket-20.mrap"
],
)
def test_s3_create_bucket_with_invalid_name(self, s3_client: S3ClientWrapper, bucket_name: str):
with reporter.step("Create bucket with invalid name and catch exception"):
with pytest.raises(Exception, match=".*(?:InvalidBucketName|Invalid bucket name).*"):
s3_client.create_bucket(bucket_name)
@allure.title("[NEGATIVE] Delete non-empty bucket (s3_client={s3_client})")
def test_s3_check_availability_non_empty_bucket_after_deleting(
self,
bucket: str,
simple_object_size: ObjectSize,
s3_client: S3ClientWrapper,
):
object_path = generate_file(simple_object_size.value)
object_name = s3_helper.object_key_from_file_path(object_path)
with reporter.step("Put object into bucket"):
s3_client.put_object(bucket, object_path)
with reporter.step("Check that object appears in bucket"):
objects = s3_client.list_objects(bucket)
assert objects, f"Expected bucket with object, got empty {objects}"
assert object_name in objects, f"Object {object_name} not found in bucket object list {objects}"
with reporter.step("Try to delete not empty bucket and get error"):
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
s3_client.delete_bucket(bucket)
with reporter.step("Check bucket availability"):
objects = s3_client.list_objects(bucket)
assert objects, f"Expected bucket with object, got empty {objects}"
assert object_name in objects, f"Object {object_name} not found in bucket object list {objects}"

View file

@ -0,0 +1,515 @@
import logging
import os
from random import choice, choices
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import (
generate_file,
generate_file_with_content,
get_file_content,
get_file_hash,
split_file,
)
logger = logging.getLogger("NeoLogger")
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw#frostfs-s3-gw", name="frostfs-s3-gateway")
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_base
class TestS3Gate:
@allure.title("Bucket API (s3_client={s3_client})")
def test_s3_buckets(
self,
s3_client: S3ClientWrapper,
client_shell: Shell,
cluster: Cluster,
simple_object_size: ObjectSize,
):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with reporter.step("Create buckets"):
bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
s3_helper.set_bucket_versioning(s3_client, bucket_1, VersioningStatus.ENABLED)
bucket_2 = s3_client.create_bucket()
with reporter.step("Check buckets are presented in the system"):
buckets = s3_client.list_buckets()
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 in buckets, f"Expected bucket {bucket_2} is in the list"
with reporter.step("Bucket must be empty"):
for bucket in (bucket_1, bucket_2):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Check buckets are visible with S3 head command"):
s3_client.head_bucket(bucket_1)
s3_client.head_bucket(bucket_2)
with reporter.step("Check we can put/list object with S3 commands"):
version_id = s3_client.put_object(bucket_1, file_path)
s3_client.head_object(bucket_1, file_name)
bucket_objects = s3_client.list_objects(bucket_1)
assert file_name in bucket_objects, f"Expected file {file_name} in objects list {bucket_objects}"
with reporter.step("Try to delete not empty bucket and get error"):
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
s3_client.delete_bucket(bucket_1)
s3_client.head_bucket(bucket_1)
with reporter.step(f"Delete empty bucket {bucket_2}"):
s3_client.delete_bucket(bucket_2)
tick_epoch(client_shell, cluster)
with reporter.step(f"Check bucket {bucket_2} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_2)
buckets = s3_client.list_buckets()
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 not in buckets, f"Expected bucket {bucket_2} is not in the list"
with reporter.step(f"Delete object from {bucket_1}"):
s3_client.delete_object(bucket_1, file_name, version_id)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=[])
with reporter.step(f"Delete bucket {bucket_1}"):
s3_client.delete_bucket(bucket_1)
tick_epoch(client_shell, cluster)
with reporter.step(f"Check bucket {bucket_1} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_1)
@allure.title("Object API (obj_size={object_size}, s3_client={s3_client})")
@pytest.mark.parametrize(
"object_size",
["simple", "complex"],
indirect=True,
)
def test_s3_api_object(
self,
s3_client: S3ClientWrapper,
object_size: ObjectSize,
two_buckets: tuple[str, str],
):
"""
Test base S3 Object API (Put/Head/List) for simple and complex objects.
"""
file_path = generate_file(object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_1, bucket_2 = two_buckets
for bucket in (bucket_1, bucket_2):
with reporter.step("Bucket must be empty"):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
s3_client.put_object(bucket, file_path)
s3_client.head_object(bucket, file_name)
bucket_objects = s3_client.list_objects(bucket)
assert file_name in bucket_objects, f"Expected file {file_name} in objects list {bucket_objects}"
with reporter.step("Check object's attributes"):
for attrs in (["ETag"], ["ObjectSize", "StorageClass"]):
s3_client.get_object_attributes(bucket, file_name, attrs)
@allure.title("Sync directory (s3_client={s3_client})")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_s3_sync_dir(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
"""
Test checks sync directory with AWS CLI utility.
"""
file_path_1 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")
file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
generate_file_with_content(simple_object_size.value, file_path=file_path_2)
s3_client.sync(bucket=bucket, dir_path=os.path.dirname(file_path_1))
with reporter.step("Check objects are synced"):
objects = s3_client.list_objects(bucket)
with reporter.step("Check these are the same objects"):
assert set(key_to_path.keys()) == set(objects), f"Expected all objects saved. Got {objects}"
for obj_key in objects:
got_object = s3_client.get_object(bucket, obj_key)
assert get_file_hash(got_object) == get_file_hash(
key_to_path.get(obj_key)
), "Expected hashes are the same"
@allure.title("Object versioning (s3_client={s3_client})")
def test_s3_api_versioning(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
"""
Test checks basic versioning functionality for S3 bucket.
"""
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with reporter.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple)
generate_file_with_content(simple_object_size.value, file_path=file_name_simple, content=version_2_content)
version_id_2 = s3_client.put_object(bucket, file_name_simple)
with reporter.step("Check bucket shows all versions"):
versions = s3_client.list_objects_versions(bucket)
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
assert obj_versions == {
version_id_1,
version_id_2,
}, f"Expected object has versions: {version_id_1, version_id_2}"
with reporter.step("Show information about particular version"):
for version_id in (version_id_1, version_id_2):
response = s3_client.head_object(bucket, obj_key, version_id=version_id)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert response.get("VersionId") == version_id, f"Expected VersionId is {version_id}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
with reporter.step("Check object's attributes"):
for version_id in (version_id_1, version_id_2):
got_attrs = s3_client.get_object_attributes(bucket, obj_key, ["ETag"], version_id=version_id)
if got_attrs:
assert got_attrs.get("VersionId") == version_id, f"Expected VersionId is {version_id}"
with reporter.step("Delete object and check it was deleted"):
response = s3_client.delete_object(bucket, obj_key)
version_id_delete = response.get("VersionId")
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_object(bucket, obj_key)
with reporter.step("Get content for all versions and check it is correct"):
for version, content in (
(version_id_2, version_2_content),
(version_id_1, version_1_content),
):
file_name = s3_client.get_object(bucket, obj_key, version_id=version)
got_content = get_file_content(file_name)
assert got_content == content, f"Expected object content is\n{content}\nGot\n{got_content}"
with reporter.step("Restore previous object version"):
s3_client.delete_object(bucket, obj_key, version_id=version_id_delete)
file_name = s3_client.get_object(bucket, obj_key)
got_content = get_file_content(file_name)
assert (
got_content == version_2_content
), f"Expected object content is\n{version_2_content}\nGot\n{got_content}"
@pytest.mark.s3_gate_multipart
@allure.title("Object Multipart API (s3_client={s3_client})")
def test_s3_api_multipart(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
"""
Test checks S3 Multipart API (Create multipart upload/Abort multipart upload/List multipart upload/
Upload part/List parts/Complete multipart upload).
"""
parts_count = 3
file_name_large = generate_file(simple_object_size.value * 1024 * 6 * parts_count) # 5Mb - min part
object_key = s3_helper.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with reporter.step("Create and abort multipart upload"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
uploads = s3_client.list_multipart_uploads(bucket)
assert uploads, f"Expected there one upload in bucket {bucket}"
assert uploads[0].get("Key") == object_key, f"Expected correct key {object_key} in upload {uploads}"
assert uploads[0].get("UploadId") == upload_id, f"Expected correct UploadId {upload_id} in upload {uploads}"
s3_client.abort_multipart_upload(bucket, object_key, upload_id)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with reporter.step("Create new multipart upload and upload several parts"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
for part_id, file_path in enumerate(part_files, start=1):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with reporter.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with reporter.step("Check we can get whole object from bucket"):
got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)
self.check_object_attributes(s3_client, bucket, object_key, parts_count)
@allure.title("Bucket tagging API (s3_client={s3_client})")
def test_s3_api_bucket_tagging(self, s3_client: S3ClientWrapper, bucket: str):
"""
Test checks S3 Bucket tagging API (Put tag/Get tag).
"""
key_value_pair = [("some-key", "some-value"), ("some-key-2", "some-value-2")]
s3_client.put_bucket_tagging(bucket, key_value_pair)
s3_helper.check_tags_by_bucket(s3_client, bucket, key_value_pair)
s3_client.delete_bucket_tagging(bucket)
s3_helper.check_tags_by_bucket(s3_client, bucket, [])
@allure.title("Object tagging API (s3_client={s3_client})")
def test_s3_api_object_tagging(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
"""
Test checks S3 Object tagging API (Put tag/Get tag/Update tag).
"""
key_value_pair_bucket = [("some-key", "some-value"), ("some-key-2", "some-value-2")]
key_value_pair_obj = [
("some-key-obj", "some-value-obj"),
("some-key--obj2", "some-value--obj2"),
]
key_value_pair_obj_new = [("some-key-obj-new", "some-value-obj-new")]
file_name_simple = generate_file(simple_object_size.value)
obj_key = s3_helper.object_key_from_file_path(file_name_simple)
s3_client.put_bucket_tagging(bucket, key_value_pair_bucket)
s3_client.put_object(bucket, file_name_simple)
for tags in (key_value_pair_obj, key_value_pair_obj_new):
s3_client.put_object_tagging(bucket, obj_key, tags)
s3_helper.check_tags_by_object(
s3_client,
bucket,
obj_key,
tags,
)
s3_client.delete_object_tagging(bucket, obj_key)
s3_helper.check_tags_by_object(s3_client, bucket, obj_key, [])
@allure.title("Delete object & delete objects (s3_client={s3_client})")
def test_s3_api_delete(
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
From second bucket some objects deleted all at once.
"""
max_obj_count = 20
max_delete_objects = 17
put_objects = []
file_paths = []
obj_sizes = [simple_object_size, complex_object_size]
bucket_1, bucket_2 = two_buckets
with reporter.step(f"Generate {max_obj_count} files"):
for _ in range(max_obj_count):
file_paths.append(generate_file(choice(obj_sizes).value))
for bucket in (bucket_1, bucket_2):
with reporter.step(f"Bucket {bucket} must be empty as it just created"):
objects_list = s3_client.list_objects_v2(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
for file_path in file_paths:
s3_client.put_object(bucket, file_path)
put_objects.append(s3_helper.object_key_from_file_path(file_path))
with reporter.step(f"Check all objects put in bucket {bucket} successfully"):
bucket_objects = s3_client.list_objects_v2(bucket)
assert set(put_objects) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
with reporter.step("Delete some objects from bucket_1 one by one"):
objects_to_delete_b1 = choices(put_objects, k=max_delete_objects)
for obj in objects_to_delete_b1:
s3_client.delete_object(bucket_1, obj)
with reporter.step("Check deleted objects are not visible in bucket bucket_1"):
bucket_objects = s3_client.list_objects_v2(bucket_1)
assert set(put_objects).difference(set(objects_to_delete_b1)) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
for object_key in objects_to_delete_b1:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_1, object_key)
with reporter.step("Delete some objects from bucket_2 at once"):
objects_to_delete_b2 = choices(put_objects, k=max_delete_objects)
s3_client.delete_objects(bucket_2, objects_to_delete_b2)
with reporter.step("Check deleted objects are not visible in bucket bucket_2"):
objects_list = s3_client.list_objects_v2(bucket_2)
assert set(put_objects).difference(set(objects_to_delete_b2)) == set(
objects_list
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
for object_key in objects_to_delete_b2:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_2, object_key)
@allure.title("Copy object to the same bucket (s3_client={s3_client})")
def test_s3_copy_same_bucket(
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
):
"""
Test object can be copied to the same bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
file_name_simple = s3_helper.object_key_from_file_path(file_path_simple)
file_name_large = s3_helper.object_key_from_file_path(file_path_large)
bucket_objects = [file_name_simple, file_name_large]
with reporter.step("Bucket must be empty"):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put objects into bucket"):
for file_path in (file_path_simple, file_path_large):
s3_client.put_object(bucket, file_path)
with reporter.step("Copy one object into the same bucket"):
copy_obj_path = s3_client.copy_object(bucket, file_name_simple)
bucket_objects.append(copy_obj_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
with reporter.step("Check copied object has the same content"):
got_copied_file = s3_client.get_object(bucket, copy_obj_path)
assert get_file_hash(file_path_simple) == get_file_hash(got_copied_file), "Hashes must be the same"
with reporter.step("Delete one object from bucket"):
s3_client.delete_object(bucket, file_name_simple)
bucket_objects.remove(file_name_simple)
s3_helper.check_objects_in_bucket(
s3_client,
bucket,
expected_objects=bucket_objects,
unexpected_objects=[file_name_simple],
)
@allure.title("Copy object to another bucket (s3_client={s3_client})")
def test_s3_copy_to_another_bucket(
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
):
"""
Test object can be copied to another bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
file_name_simple = s3_helper.object_key_from_file_path(file_path_simple)
file_name_large = s3_helper.object_key_from_file_path(file_path_large)
bucket_1_objects = [file_name_simple, file_name_large]
bucket_1, bucket_2 = two_buckets
with reporter.step("Buckets must be empty"):
for bucket in (bucket_1, bucket_2):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put objects into one bucket"):
for file_path in (file_path_simple, file_path_large):
s3_client.put_object(bucket_1, file_path)
with reporter.step("Copy object from first bucket into second"):
copy_obj_path_b2 = s3_client.copy_object(bucket_1, file_name_large, bucket=bucket_2)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with reporter.step("Check copied object has the same content"):
got_copied_file_b2 = s3_client.get_object(bucket_2, copy_obj_path_b2)
assert get_file_hash(file_path_large) == get_file_hash(got_copied_file_b2), "Hashes must be the same"
with reporter.step("Delete one object from first bucket"):
s3_client.delete_object(bucket_1, file_name_simple)
bucket_1_objects.remove(file_name_simple)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with reporter.step("Delete one object from second bucket and check it is empty"):
s3_client.delete_object(bucket_2, copy_obj_path_b2)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[])
def check_object_attributes(self, s3_client: S3ClientWrapper, bucket: str, object_key: str, parts_count: int):
if not isinstance(s3_client, AwsCliClient):
logger.warning("Attributes check is not supported for boto3 implementation")
return
with reporter.step("Check object's attributes"):
obj_parts = s3_client.get_object_attributes(bucket, object_key, ["ObjectParts"], full_output=False)
assert obj_parts.get("TotalPartsCount") == parts_count, f"Expected TotalPartsCount is {parts_count}"
assert len(obj_parts.get("Parts")) == parts_count, f"Expected Parts cunt is {parts_count}"
with reporter.step("Check object's attribute max-parts"):
max_parts = 2
obj_parts = s3_client.get_object_attributes(
bucket,
object_key,
["ObjectParts"],
max_parts=max_parts,
full_output=False,
)
assert obj_parts.get("TotalPartsCount") == parts_count, f"Expected TotalPartsCount is {parts_count}"
assert obj_parts.get("MaxParts") == max_parts, f"Expected MaxParts is {parts_count}"
assert len(obj_parts.get("Parts")) == max_parts, f"Expected Parts count is {parts_count}"
with reporter.step("Check object's attribute part-number-marker"):
part_number_marker = 3
obj_parts = s3_client.get_object_attributes(
bucket,
object_key,
["ObjectParts"],
part_number=part_number_marker,
full_output=False,
)
assert obj_parts.get("TotalPartsCount") == parts_count, f"Expected TotalPartsCount is {parts_count}"
assert (
obj_parts.get("PartNumberMarker") == part_number_marker
), f"Expected PartNumberMarker is {part_number_marker}"
assert len(obj_parts.get("Parts")) == 1, f"Expected Parts count is {parts_count}"

View file

@ -10,33 +10,23 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
@allure.title("[Module] Create bucket with object_lock_enabled_for_bucket")
@pytest.fixture(scope="module")
def bucket_w_lock(s3_client: S3ClientWrapper):
return s3_client.create_bucket(object_lock_enabled_for_bucket=True)
@allure.title("[Module] Create bucket without object_lock_enabled_for_bucket")
@pytest.fixture(scope="module")
def bucket_no_lock(s3_client: S3ClientWrapper):
return s3_client.create_bucket(object_lock_enabled_for_bucket=False)
@pytest.mark.nightly
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_locking @pytest.mark.s3_gate_locking
@pytest.mark.parametrize("version_id", [None, "second"]) @pytest.mark.parametrize("version_id", [None, "second"])
class TestS3GateLocking: class TestS3GateLocking:
@allure.title("Retention period and legal lock on object (version_id={version_id}, s3_client={s3_client})") @allure.title("Retention period and legal lock on object (version_id={version_id}, s3_client={s3_client})")
def test_s3_object_locking(self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize): def test_s3_object_locking(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 2 retention_period = 2
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with reporter.step("Put several versions of object into bucket"): with reporter.step("Put several versions of object into bucket"):
s3_client.put_object(bucket_w_lock, file_path) s3_client.put_object(bucket, file_path)
file_name_1 = generate_file_with_content(simple_object_size.value, file_path=file_path) file_name_1 = generate_file_with_content(simple_object_size.value, file_path=file_path)
version_id_2 = s3_client.put_object(bucket_w_lock, file_name_1) version_id_2 = s3_client.put_object(bucket, file_name_1)
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
if version_id: if version_id:
version_id = version_id_2 version_id = version_id_2
@ -46,42 +36,45 @@ class TestS3GateLocking:
"Mode": "COMPLIANCE", "Mode": "COMPLIANCE",
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id) s3_client.put_object_retention(bucket, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "COMPLIANCE", date_obj, "OFF") s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF")
with reporter.step(f"Put legal hold to object {file_name}"): with reporter.step(f"Put legal hold to object {file_name}"):
s3_client.put_object_legal_hold(bucket_w_lock, file_name, "ON", version_id) s3_client.put_object_legal_hold(bucket, file_name, "ON", version_id)
s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "COMPLIANCE", date_obj, "ON") s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON")
with reporter.step("Fail with deleting object with legal hold and retention period"): with reporter.step("Fail with deleting object with legal hold and retention period"):
if version_id: if version_id:
with pytest.raises(Exception): with pytest.raises(Exception):
# An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied. # An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied.
s3_client.delete_object(bucket_w_lock, file_name, version_id) s3_client.delete_object(bucket, file_name, version_id)
with reporter.step("Check retention period is no longer set on the uploaded object"): with reporter.step("Check retention period is no longer set on the uploaded object"):
time.sleep((retention_period + 1) * 60) time.sleep((retention_period + 1) * 60)
s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "COMPLIANCE", date_obj, "ON") s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON")
with reporter.step("Fail with deleting object with legal hold and retention period"): with reporter.step("Fail with deleting object with legal hold and retention period"):
if version_id: if version_id:
with pytest.raises(Exception): with pytest.raises(Exception):
# An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied. # An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied.
s3_client.delete_object(bucket_w_lock, file_name, version_id) s3_client.delete_object(bucket, file_name, version_id)
else: else:
s3_client.delete_object(bucket_w_lock, file_name, version_id) s3_client.delete_object(bucket, file_name, version_id)
@allure.title("Impossible to change retention mode COMPLIANCE (version_id={version_id}, s3_client={s3_client})") @allure.title("Impossible to change retention mode COMPLIANCE (version_id={version_id}, s3_client={s3_client})")
def test_s3_mode_compliance(self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize): def test_s3_mode_compliance(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 2 retention_period = 2
retention_period_1 = 1 retention_period_1 = 1
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
obj_version = s3_client.put_object(bucket_w_lock, file_path) obj_version = s3_client.put_object(bucket, file_path)
if version_id: if version_id:
version_id = obj_version version_id = obj_version
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with reporter.step(f"Put retention period {retention_period}min to object {file_name}"): with reporter.step(f"Put retention period {retention_period}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period) date_obj = datetime.utcnow() + timedelta(minutes=retention_period)
@ -89,8 +82,8 @@ class TestS3GateLocking:
"Mode": "COMPLIANCE", "Mode": "COMPLIANCE",
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id) s3_client.put_object_retention(bucket, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "COMPLIANCE", date_obj, "OFF") s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF")
with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"): with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1) date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
@ -99,20 +92,23 @@ class TestS3GateLocking:
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id) s3_client.put_object_retention(bucket, file_name, retention, version_id)
@allure.title("Change retention mode GOVERNANCE (version_id={version_id}, s3_client={s3_client})") @allure.title("Change retention mode GOVERNANCE (version_id={version_id}, s3_client={s3_client})")
def test_s3_mode_governance(self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize): def test_s3_mode_governance(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 3 retention_period = 3
retention_period_1 = 2 retention_period_1 = 2
retention_period_2 = 5 retention_period_2 = 5
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
obj_version = s3_client.put_object(bucket_w_lock, file_path) obj_version = s3_client.put_object(bucket, file_path)
if version_id: if version_id:
version_id = obj_version version_id = obj_version
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with reporter.step(f"Put retention period {retention_period}min to object {file_name}"): with reporter.step(f"Put retention period {retention_period}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period) date_obj = datetime.utcnow() + timedelta(minutes=retention_period)
@ -120,8 +116,8 @@ class TestS3GateLocking:
"Mode": "GOVERNANCE", "Mode": "GOVERNANCE",
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id) s3_client.put_object_retention(bucket, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "GOVERNANCE", date_obj, "OFF") s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF")
with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"): with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1) date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
@ -130,7 +126,7 @@ class TestS3GateLocking:
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id) s3_client.put_object_retention(bucket, file_name, retention, version_id)
with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"): with reporter.step(f"Try to change retention period {retention_period_1}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1) date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
@ -139,7 +135,7 @@ class TestS3GateLocking:
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id) s3_client.put_object_retention(bucket, file_name, retention, version_id)
with reporter.step(f"Put new retention period {retention_period_2}min to object {file_name}"): with reporter.step(f"Put new retention period {retention_period_2}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_2) date_obj = datetime.utcnow() + timedelta(minutes=retention_period_2)
@ -147,45 +143,51 @@ class TestS3GateLocking:
"Mode": "GOVERNANCE", "Mode": "GOVERNANCE",
"RetainUntilDate": date_obj, "RetainUntilDate": date_obj,
} }
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id, True) s3_client.put_object_retention(bucket, file_name, retention, version_id, True)
s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "GOVERNANCE", date_obj, "OFF") s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF")
@allure.title("[NEGATIVE] Lock object in bucket with disabled locking (version_id={version_id}, s3_client={s3_client})") @allure.title(
def test_s3_legal_hold(self, s3_client: S3ClientWrapper, bucket_no_lock: str, version_id: str, simple_object_size: ObjectSize): "[NEGATIVE] Lock object in bucket with disabled locking (version_id={version_id}, s3_client={s3_client})"
)
def test_s3_legal_hold(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
obj_version = s3_client.put_object(bucket_no_lock, file_path) obj_version = s3_client.put_object(bucket, file_path)
if version_id: if version_id:
version_id = obj_version version_id = obj_version
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with reporter.step(f"Put legal hold to object {file_name}"): with reporter.step(f"Put legal hold to object {file_name}"):
with pytest.raises(Exception): with pytest.raises(Exception):
s3_client.put_object_legal_hold(bucket_no_lock, file_name, "ON", version_id) s3_client.put_object_legal_hold(bucket, file_name, "ON", version_id)
@pytest.mark.nightly
@pytest.mark.s3_gate @pytest.mark.s3_gate
class TestS3GateLockingBucket: class TestS3GateLockingBucket:
@allure.title("Bucket Lock (s3_client={s3_client})") @allure.title("Bucket Lock (s3_client={s3_client})")
def test_s3_bucket_lock(self, s3_client: S3ClientWrapper, bucket_w_lock: str, simple_object_size: ObjectSize): def test_s3_bucket_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
configuration = {"Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}} configuration = {"Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}}
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with reporter.step("PutObjectLockConfiguration with ObjectLockEnabled=False"): with reporter.step("PutObjectLockConfiguration with ObjectLockEnabled=False"):
s3_client.put_object_lock_configuration(bucket_w_lock, configuration) s3_client.put_object_lock_configuration(bucket, configuration)
with reporter.step("PutObjectLockConfiguration with ObjectLockEnabled=True"): with reporter.step("PutObjectLockConfiguration with ObjectLockEnabled=True"):
configuration["ObjectLockEnabled"] = "Enabled" configuration["ObjectLockEnabled"] = "Enabled"
s3_client.put_object_lock_configuration(bucket_w_lock, configuration) s3_client.put_object_lock_configuration(bucket, configuration)
with reporter.step("GetObjectLockConfiguration"): with reporter.step("GetObjectLockConfiguration"):
config = s3_client.get_object_lock_configuration(bucket_w_lock) config = s3_client.get_object_lock_configuration(bucket)
configuration["Rule"]["DefaultRetention"]["Years"] = 0 configuration["Rule"]["DefaultRetention"]["Years"] = 0
assert config == configuration, f"Configurations must be equal {configuration}" assert config == configuration, f"Configurations must be equal {configuration}"
with reporter.step("Put object into bucket"): with reporter.step("Put object into bucket"):
s3_client.put_object(bucket_w_lock, file_path) s3_client.put_object(bucket, file_path)
s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "COMPLIANCE", None, "OFF", 1) s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", None, "OFF", 1)

View file

@ -2,19 +2,16 @@ import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.steps.cli.container import list_objects, search_container_by_name
from frostfs_testlib.steps.cli.container import list_objects
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split_file from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split_file
PART_SIZE = 5 * 1024 * 1024 PART_SIZE = 5 * 1024 * 1024
@pytest.mark.nightly
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_multipart @pytest.mark.s3_gate_multipart
class TestS3GateMultipart(ClusterTestBase): class TestS3GateMultipart(ClusterTestBase):
@ -23,12 +20,7 @@ class TestS3GateMultipart(ClusterTestBase):
@allure.title("Object Multipart API (s3_client={s3_client}, bucket versioning = {versioning_status})") @allure.title("Object Multipart API (s3_client={s3_client}, bucket versioning = {versioning_status})")
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED, VersioningStatus.UNDEFINED], indirect=True) @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED, VersioningStatus.UNDEFINED], indirect=True)
def test_s3_object_multipart( def test_s3_object_multipart(
self, self, s3_client: S3ClientWrapper, bucket: str, default_wallet: WalletInfo, versioning_status: str
s3_client: S3ClientWrapper,
bucket: str,
default_wallet: WalletInfo,
versioning_status: str,
bucket_container_resolver: BucketContainerResolver,
): ):
parts_count = 5 parts_count = 5
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
@ -38,7 +30,7 @@ class TestS3GateMultipart(ClusterTestBase):
with reporter.step(f"Get related container_id for bucket"): with reporter.step(f"Get related container_id for bucket"):
for cluster_node in self.cluster.cluster_nodes: for cluster_node in self.cluster.cluster_nodes:
container_id = bucket_container_resolver.resolve(cluster_node, bucket) container_id = search_container_by_name(bucket, cluster_node)
if container_id: if container_id:
break break
@ -54,18 +46,15 @@ class TestS3GateMultipart(ClusterTestBase):
for part_id, file_path in enumerate(part_files[1:], start=2): for part_id, file_path in enumerate(part_files[1:], start=2):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path) etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag)) parts.append((part_id, etag))
with reporter.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, object_key, upload_id) got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
with reporter.step("Complete multipart upload"):
response = s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts) response = s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
version_id = None version_id = None
if versioning_status == VersioningStatus.ENABLED: if versioning_status == VersioningStatus.ENABLED:
version_id = response["VersionId"] version_id = response["VersionId"]
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
with reporter.step("There should be no multipart uploads"): with reporter.step("Check upload list is empty"):
uploads = s3_client.list_multipart_uploads(bucket) uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}" assert not uploads, f"Expected there is no uploads in bucket {bucket}"
@ -73,14 +62,18 @@ class TestS3GateMultipart(ClusterTestBase):
got_object = s3_client.get_object(bucket, object_key) got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large) assert get_file_hash(got_object) == get_file_hash(file_name_large)
with reporter.step("Delete the object"): if version_id:
with reporter.step("Delete the object version"):
s3_client.delete_object(bucket, object_key, version_id) s3_client.delete_object(bucket, object_key, version_id)
else:
with reporter.step("Delete the object"):
s3_client.delete_object(bucket, object_key)
with reporter.step("There should be no objects in bucket"): with reporter.step("List objects in the bucket, expect to be empty"):
objects_list = s3_client.list_objects(bucket) objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}" assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("There should be no objects in container"): with reporter.step("List objects in the container via rpc, expect to be empty"):
objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint) objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
assert len(objects) == 0, f"Expected no objects in container, got\n{objects}" assert len(objects) == 0, f"Expected no objects in container, got\n{objects}"
@ -93,7 +86,6 @@ class TestS3GateMultipart(ClusterTestBase):
bucket: str, bucket: str,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
complex_object_size: ObjectSize, complex_object_size: ObjectSize,
bucket_container_resolver: BucketContainerResolver,
): ):
complex_file = generate_file(complex_object_size.value) complex_file = generate_file(complex_object_size.value)
simple_file = generate_file(simple_object_size.value) simple_file = generate_file(simple_object_size.value)
@ -101,24 +93,24 @@ class TestS3GateMultipart(ClusterTestBase):
files_count = len(to_upload) files_count = len(to_upload)
upload_key = "multipart_abort" upload_key = "multipart_abort"
with reporter.step("Get related container_id for bucket"): with reporter.step(f"Get related container_id for bucket '{bucket}'"):
for cluster_node in self.cluster.cluster_nodes: for cluster_node in self.cluster.cluster_nodes:
container_id = bucket_container_resolver.resolve(cluster_node, bucket) container_id = search_container_by_name(bucket, cluster_node)
if container_id: if container_id:
break break
with reporter.step("Create multipart upload"): with reporter.step("Create multipart upload"):
upload_id = s3_client.create_multipart_upload(bucket, upload_key) upload_id = s3_client.create_multipart_upload(bucket, upload_key)
with reporter.step(f"Upload {files_count} parts to multipart upload"): with reporter.step(f"Upload {files_count} files to multipart upload"):
for i, file in enumerate(to_upload, 1): for i, file in enumerate(to_upload, 1):
s3_client.upload_part(bucket, upload_key, upload_id, i, file) s3_client.upload_part(bucket, upload_key, upload_id, i, file)
with reporter.step(f"There should be {files_count} objects in bucket"): with reporter.step(f"Check that we have {files_count} files in bucket"):
parts = s3_client.list_parts(bucket, upload_key, upload_id) parts = s3_client.list_parts(bucket, upload_key, upload_id)
assert len(parts) == files_count, f"Expected {files_count} parts, got\n{parts}" assert len(parts) == files_count, f"Expected {files_count} parts, got\n{parts}"
with reporter.step(f"There should be {files_count} objects in container"): with reporter.step(f"Check that we have {files_count} files in container '{container_id}'"):
objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint) objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
assert len(objects) == files_count, f"Expected {files_count} objects in container, got\n{objects}" assert len(objects) == files_count, f"Expected {files_count} objects in container, got\n{objects}"
@ -127,19 +119,14 @@ class TestS3GateMultipart(ClusterTestBase):
uploads = s3_client.list_multipart_uploads(bucket) uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected no uploads in bucket {bucket}" assert not uploads, f"Expected no uploads in bucket {bucket}"
with reporter.step("There should be no objects in bucket"): with reporter.step("Check that we have no files in bucket since upload was aborted"):
with pytest.raises(Exception, match=self.NO_SUCH_UPLOAD): with pytest.raises(Exception, match=self.NO_SUCH_UPLOAD):
s3_client.list_parts(bucket, upload_key, upload_id) s3_client.list_parts(bucket, upload_key, upload_id)
with reporter.step("There should be no objects in container"): with reporter.step("Check that we have no files in container since upload was aborted"):
@wait_for_success(120, 10)
def check_no_objects():
objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint) objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
assert len(objects) == 0, f"Expected no objects in container, got\n{objects}" assert len(objects) == 0, f"Expected no objects in container, got\n{objects}"
check_no_objects()
@allure.title("Upload Part Copy (s3_client={s3_client})") @allure.title("Upload Part Copy (s3_client={s3_client})")
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True) @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
def test_s3_multipart_copy(self, s3_client: S3ClientWrapper, bucket: str): def test_s3_multipart_copy(self, s3_client: S3ClientWrapper, bucket: str):
@ -159,9 +146,7 @@ class TestS3GateMultipart(ClusterTestBase):
with reporter.step("Create multipart upload object"): with reporter.step("Create multipart upload object"):
upload_id = s3_client.create_multipart_upload(bucket, object_key) upload_id = s3_client.create_multipart_upload(bucket, object_key)
uploads = s3_client.list_multipart_uploads(bucket) uploads = s3_client.list_multipart_uploads(bucket)
assert len(uploads) == 1, f"Expected one upload in bucket {bucket}" assert uploads, f"Expected there are uploads in bucket {bucket}"
assert uploads[0].get("Key") == object_key, f"Expected correct key {object_key} in upload {uploads}"
assert uploads[0].get("UploadId") == upload_id, f"Expected correct UploadId {upload_id} in upload {uploads}"
with reporter.step("Upload parts to multipart upload"): with reporter.step("Upload parts to multipart upload"):
for part_id, obj_key in enumerate(objs, start=1): for part_id, obj_key in enumerate(objs, start=1):
@ -173,6 +158,6 @@ class TestS3GateMultipart(ClusterTestBase):
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts) s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}" assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
with reporter.step("Get whole object from bucket"): with reporter.step("Check we can get whole object from bucket"):
got_object = s3_client.get_object(bucket, object_key) got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large) assert get_file_hash(got_object) == get_file_hash(file_name_large)

View file

@ -1,25 +1,23 @@
import os import os
import random
import string import string
import uuid import uuid
from datetime import datetime, timedelta from datetime import datetime, timedelta
from random import choices, sample
from typing import Literal from typing import Literal
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
from frostfs_testlib.resources.error_patterns import S3_BUCKET_DOES_NOT_ALLOW_ACL, S3_MALFORMED_XML_REQUEST from frostfs_testlib.resources.error_patterns import S3_MALFORMED_XML_REQUEST
from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils import wallet_utils from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.file_utils import TestFile, concat_files, generate_file, generate_file_with_content, get_file_hash from frostfs_testlib.utils.file_utils import concat_files, generate_file, generate_file_with_content, get_file_hash
@pytest.mark.nightly
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_object @pytest.mark.s3_gate_object
class TestS3GateObject: class TestS3GateObject:
@ -30,44 +28,11 @@ class TestS3GateObject:
public_key = wallet_utils.get_wallet_public_key(second_wallet, DEFAULT_WALLET_PASS) public_key = wallet_utils.get_wallet_public_key(second_wallet, DEFAULT_WALLET_PASS)
yield public_key yield public_key
@allure.title("Object API (obj_size={object_size}, s3_client={s3_client})")
@pytest.mark.parametrize(
"object_size",
["simple", "complex"],
indirect=True,
)
def test_s3_api_object(
self,
s3_client: S3ClientWrapper,
object_size: ObjectSize,
bucket: str,
):
"""
Test base S3 Object API (Put/Head/List) for simple and complex objects.
"""
with reporter.step("Prepare object to upload"):
test_file = generate_file(object_size.value)
file_name = s3_helper.object_key_from_file_path(test_file)
with reporter.step("Put object to bucket"):
s3_client.put_object(bucket, test_file)
with reporter.step("Head object from bucket"):
s3_client.head_object(bucket, file_name)
with reporter.step("Verify object in list"):
bucket_objects = s3_client.list_objects(bucket)
assert file_name in bucket_objects, f"Expected file {file_name} in objects list {bucket_objects}"
with reporter.step("Check object's attributes"):
for attrs in (["ETag"], ["ObjectSize", "StorageClass"]):
s3_client.get_object_attributes(bucket, file_name, attrs)
@allure.title("Copy object (s3_client={s3_client})") @allure.title("Copy object (s3_client={s3_client})")
def test_s3_copy_object( def test_s3_copy_object(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
two_buckets: list[str], two_buckets: tuple[str, str],
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
): ):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
@ -76,6 +41,9 @@ class TestS3GateObject:
bucket_1, bucket_2 = two_buckets bucket_1, bucket_2 = two_buckets
objects_list = s3_client.list_objects(bucket_1)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put object into one bucket"): with reporter.step("Put object into one bucket"):
s3_client.put_object(bucket_1, file_path) s3_client.put_object(bucket_1, file_path)
@ -110,7 +78,7 @@ class TestS3GateObject:
def test_s3_copy_version_object( def test_s3_copy_version_object(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
two_buckets: list[str], two_buckets: tuple[str, str],
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
): ):
version_1_content = "Version 1" version_1_content = "Version 1"
@ -147,23 +115,20 @@ class TestS3GateObject:
@allure.title("Copy with acl (s3_client={s3_client})") @allure.title("Copy with acl (s3_client={s3_client})")
def test_s3_copy_acl(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize): def test_s3_copy_acl(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
file_path = generate_file_with_content(simple_object_size.value) version_1_content = "Version 1"
file_name = os.path.basename(file_path) file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with reporter.step("Put object into bucket"): with reporter.step("Put several versions of object into bucket"):
s3_client.put_object(bucket, file_path) s3_client.put_object(bucket, file_name_simple)
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name]) s3_helper.check_objects_in_bucket(s3_client, bucket, [obj_key])
with reporter.step("[NEGATIVE] Copy object with public-read-write ACL"): with reporter.step("Copy object and check acl attribute"):
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL): copy_obj_path = s3_client.copy_object(bucket, obj_key, acl="public-read-write")
copy_path = s3_client.copy_object(bucket, file_name, acl="public-read-write") obj_acl = s3_client.get_object_acl(bucket, copy_obj_path)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
with reporter.step("Copy object with private ACL"):
copy_path = s3_client.copy_object(bucket, file_name, acl="private")
object_grants = s3_client.get_object_acl(bucket, copy_path)
s3_helper.verify_acl_permissions(object_grants, PRIVATE_GRANTS)
@allure.title("Copy object with metadata (s3_client={s3_client})") @allure.title("Copy object with metadata (s3_client={s3_client})")
def test_s3_copy_metadate(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize): def test_s3_copy_metadate(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
@ -268,7 +233,9 @@ class TestS3GateObject:
with reporter.step("Put several versions of object into bucket"): with reporter.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple) version_id_1 = s3_client.put_object(bucket, file_name_simple)
file_name_1 = generate_file_with_content(simple_object_size.value, file_name_simple, version_2_content) file_name_1 = generate_file_with_content(
simple_object_size.value, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_1) version_id_2 = s3_client.put_object(bucket, file_name_1)
with reporter.step("Check bucket shows all versions"): with reporter.step("Check bucket shows all versions"):
@ -287,16 +254,16 @@ class TestS3GateObject:
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created" assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
with reporter.step("Delete second version of object"): with reporter.step("Delete second version of object"):
delete_obj = s3_client.delete_object(bucket, obj_key, version_id_2) delete_obj = s3_client.delete_object(bucket, obj_key, version_id=version_id_2)
versions = s3_client.list_objects_versions(bucket) versions = s3_client.list_objects_versions(bucket)
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key} obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
assert not obj_versions, "Expected object not found" assert not obj_versions, "Expected object not found"
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created" assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
with reporter.step("Put new object into bucket"): with reporter.step("Put new object into bucket"):
file_name_complex = generate_file(complex_object_size.value) file_name_simple = generate_file(complex_object_size.value)
obj_key = os.path.basename(file_name_complex) obj_key = os.path.basename(file_name_simple)
s3_client.put_object(bucket, file_name_complex) s3_client.put_object(bucket, file_name_simple)
with reporter.step("Delete last object"): with reporter.step("Delete last object"):
delete_obj = s3_client.delete_object(bucket, obj_key) delete_obj = s3_client.delete_object(bucket, obj_key)
@ -317,11 +284,17 @@ class TestS3GateObject:
with reporter.step("Put several versions of object into bucket"): with reporter.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_1) version_id_1 = s3_client.put_object(bucket, file_name_1)
file_name_2 = generate_file_with_content(simple_object_size.value, file_name_1, version_2_content) file_name_2 = generate_file_with_content(
simple_object_size.value, file_path=file_name_1, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_2) version_id_2 = s3_client.put_object(bucket, file_name_2)
file_name_3 = generate_file_with_content(simple_object_size.value, file_name_1, version_3_content) file_name_3 = generate_file_with_content(
simple_object_size.value, file_path=file_name_1, content=version_3_content
)
version_id_3 = s3_client.put_object(bucket, file_name_3) version_id_3 = s3_client.put_object(bucket, file_name_3)
file_name_4 = generate_file_with_content(simple_object_size.value, file_name_1, version_4_content) file_name_4 = generate_file_with_content(
simple_object_size.value, file_path=file_name_1, content=version_4_content
)
version_id_4 = s3_client.put_object(bucket, file_name_4) version_id_4 = s3_client.put_object(bucket, file_name_4)
version_ids = {version_id_1, version_id_2, version_id_3, version_id_4} version_ids = {version_id_1, version_id_2, version_id_3, version_id_4}
@ -331,7 +304,7 @@ class TestS3GateObject:
assert obj_versions == version_ids, f"Object should have versions: {version_ids}" assert obj_versions == version_ids, f"Object should have versions: {version_ids}"
with reporter.step("Delete two objects from bucket one by one"): with reporter.step("Delete two objects from bucket one by one"):
version_to_delete_b1 = random.sample([version_id_1, version_id_2, version_id_3, version_id_4], k=2) version_to_delete_b1 = sample([version_id_1, version_id_2, version_id_3, version_id_4], k=2)
version_to_save = list(set(version_ids) - set(version_to_delete_b1)) version_to_save = list(set(version_ids) - set(version_to_delete_b1))
for ver in version_to_delete_b1: for ver in version_to_delete_b1:
s3_client.delete_object(bucket, obj_key, ver) s3_client.delete_object(bucket, obj_key, ver)
@ -351,7 +324,9 @@ class TestS3GateObject:
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with reporter.step("Put several versions of object into bucket"): with reporter.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple) version_id_1 = s3_client.put_object(bucket, file_name_simple)
file_name_1 = generate_file_with_content(simple_object_size.value, file_path=file_name_simple, content=version_2_content) file_name_1 = generate_file_with_content(
simple_object_size.value, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_1) version_id_2 = s3_client.put_object(bucket, file_name_1)
with reporter.step("Get first version of object"): with reporter.step("Get first version of object"):
@ -437,7 +412,9 @@ class TestS3GateObject:
assert get_file_hash(con_file_1) == get_file_hash(file_name_1), "Hashes must be the same" assert get_file_hash(con_file_1) == get_file_hash(file_name_1), "Hashes must be the same"
with reporter.step("Get object"): with reporter.step("Get object"):
object_3_part_1 = s3_client.get_object(bucket, file_name, object_range=[0, int(simple_object_size.value / 3)]) object_3_part_1 = s3_client.get_object(
bucket, file_name, object_range=[0, int(simple_object_size.value / 3)]
)
object_3_part_2 = s3_client.get_object( object_3_part_2 = s3_client.get_object(
bucket, bucket,
file_name, file_name,
@ -551,7 +528,9 @@ class TestS3GateObject:
elif list_type == "v2": elif list_type == "v2":
list_obj = s3_client.list_objects_v2(bucket) list_obj = s3_client.list_objects_v2(bucket)
assert len(list_obj) == 2, "bucket should have 2 objects" assert len(list_obj) == 2, "bucket should have 2 objects"
assert list_obj.sort() == [file_name, file_name_2].sort(), f"bucket should have object key {file_name, file_name_2}" assert (
list_obj.sort() == [file_name, file_name_2].sort()
), f"bucket should have object key {file_name, file_name_2}"
with reporter.step("Delete object"): with reporter.step("Delete object"):
delete_obj = s3_client.delete_object(bucket, file_name) delete_obj = s3_client.delete_object(bucket, file_name)
@ -659,34 +638,71 @@ class TestS3GateObject:
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
second_wallet_public_key: str, second_wallet_public_key: str,
): ):
file_path = generate_file(complex_object_size.value) file_path_1 = generate_file(complex_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path_1)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus[bucket_versioning]) if bucket_versioning == "ENABLED":
status = VersioningStatus.ENABLED
elif bucket_versioning == "SUSPENDED":
status = VersioningStatus.SUSPENDED
s3_helper.set_bucket_versioning(s3_client, bucket, status)
with reporter.step("Put object with acl private"): with reporter.step("Put object with acl private"):
s3_client.put_object(bucket, file_path, acl="private") s3_client.put_object(bucket, file_path_1, acl="private")
object_grants = s3_client.get_object_acl(bucket, file_name) obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.verify_acl_permissions(object_grants, PRIVATE_GRANTS) s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
object = s3_client.get_object(bucket, file_name) object_1 = s3_client.get_object(bucket, file_name)
assert get_file_hash(file_path) == get_file_hash(object), "Hashes must be the same" assert get_file_hash(file_path_1) == get_file_hash(object_1), "Hashes must be the same"
with reporter.step("[NEGATIVE] Put object with acl public-read"): with reporter.step("Put object with acl public-read"):
generate_file_with_content(simple_object_size.value, file_path) file_path_2 = generate_file_with_content(simple_object_size.value, file_path=file_path_1)
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL): s3_client.put_object(bucket, file_path_2, acl="public-read")
s3_client.put_object(bucket, file_path, acl="public-read") obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
object_2 = s3_client.get_object(bucket, file_name)
assert get_file_hash(file_path_2) == get_file_hash(object_2), "Hashes must be the same"
with reporter.step("[NEGATIVE] Put object with acl public-read-write"): with reporter.step("Put object with acl public-read-write"):
generate_file_with_content(simple_object_size.value, file_path) file_path_3 = generate_file_with_content(simple_object_size.value, file_path=file_path_1)
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL): s3_client.put_object(bucket, file_path_3, acl="public-read-write")
s3_client.put_object(bucket, file_path, acl="public-read-write") obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
object_3 = s3_client.get_object(bucket, file_name)
assert get_file_hash(file_path_3) == get_file_hash(object_3), "Hashes must be the same"
with reporter.step("[NEGATIVE] Put object with --grant-full-control id=mycanonicaluserid"): with reporter.step("Put object with acl authenticated-read"):
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL): file_path_4 = generate_file_with_content(simple_object_size.value, file_path=file_path_1)
s3_client.put_object(bucket, file_path, grant_full_control=f"id={second_wallet_public_key}") s3_client.put_object(bucket, file_path_4, acl="authenticated-read")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
object_4 = s3_client.get_object(bucket, file_name)
assert get_file_hash(file_path_4) == get_file_hash(object_4), "Hashes must be the same"
with reporter.step("[NEGATIVE] Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"): file_path_5 = generate_file(complex_object_size.value)
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL): file_name_5 = s3_helper.object_key_from_file_path(file_path_5)
s3_client.put_object(bucket, file_path, grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers")
with reporter.step("Put object with --grant-full-control id=mycanonicaluserid"):
generate_file_with_content(simple_object_size.value, file_path=file_path_5)
s3_client.put_object(
bucket,
file_path_5,
grant_full_control=f"id={second_wallet_public_key}",
)
obj_acl = s3_client.get_object_acl(bucket, file_name_5)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
object_5 = s3_client.get_object(bucket, file_name_5)
assert get_file_hash(file_path_5) == get_file_hash(object_5), "Hashes must be the same"
with reporter.step("Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"):
generate_file_with_content(simple_object_size.value, file_path=file_path_5)
s3_client.put_object(
bucket,
file_path_5,
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
obj_acl = s3_client.get_object_acl(bucket, file_name_5)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
object_6 = s3_client.get_object(bucket, file_name_5)
assert get_file_hash(file_path_5) == get_file_hash(object_6), "Hashes must be the same"
@allure.title("Put object with lock-mode (s3_client={s3_client})") @allure.title("Put object with lock-mode (s3_client={s3_client})")
def test_s3_put_object_lock_mode( def test_s3_put_object_lock_mode(
@ -712,7 +728,9 @@ class TestS3GateObject:
) )
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF") s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF")
with reporter.step("Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +3days]"): with reporter.step(
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +3days]"
):
date_obj = datetime.utcnow() + timedelta(days=2) date_obj = datetime.utcnow() + timedelta(days=2)
generate_file_with_content(simple_object_size.value, file_path=file_path_1) generate_file_with_content(simple_object_size.value, file_path=file_path_1)
s3_client.put_object( s3_client.put_object(
@ -723,7 +741,9 @@ class TestS3GateObject:
) )
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF") s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF")
with reporter.step("Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +2days]"): with reporter.step(
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +2days]"
):
date_obj = datetime.utcnow() + timedelta(days=3) date_obj = datetime.utcnow() + timedelta(days=3)
generate_file_with_content(simple_object_size.value, file_path=file_path_1) generate_file_with_content(simple_object_size.value, file_path=file_path_1)
s3_client.put_object( s3_client.put_object(
@ -757,68 +777,6 @@ class TestS3GateObject:
object_lock_retain_until_date=date_obj, object_lock_retain_until_date=date_obj,
) )
@allure.title("Delete object & delete objects (s3_client={s3_client})")
def test_s3_api_delete(
self,
s3_client: S3ClientWrapper,
two_buckets: list[str],
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
From second bucket some objects deleted all at once.
"""
max_obj_count = 20
max_delete_objects = 17
put_objects = []
file_paths = []
obj_sizes = [simple_object_size, complex_object_size]
bucket_1, bucket_2 = two_buckets
with reporter.step(f"Generate {max_obj_count} files"):
for _ in range(max_obj_count):
test_file = generate_file(random.choice(obj_sizes).value)
file_paths.append(test_file)
put_objects.append(s3_helper.object_key_from_file_path(test_file.path))
for i, bucket in enumerate([bucket_1, bucket_2], 1):
with reporter.step(f"Put {max_obj_count} objects into bucket_{i}"):
for file_path in file_paths:
s3_client.put_object(bucket, file_path)
with reporter.step(f"Check all objects put in bucket_{i} successfully"):
bucket_objects = s3_client.list_objects_v2(bucket)
assert set(put_objects) == set(bucket_objects), f"Expected all objects {put_objects} in objects list {bucket_objects}"
with reporter.step("Delete some objects from bucket_1 one by one"):
objects_to_delete_b1 = random.sample(put_objects, k=max_delete_objects)
for obj in objects_to_delete_b1:
s3_client.delete_object(bucket_1, obj)
with reporter.step("Check deleted objects are not visible in bucket bucket_1"):
bucket_objects = s3_client.list_objects_v2(bucket_1)
assert set(put_objects).difference(set(objects_to_delete_b1)) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
for object_key in objects_to_delete_b1:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_1, object_key)
with reporter.step("Delete some objects from bucket_2 at once"):
objects_to_delete_b2 = random.sample(put_objects, k=max_delete_objects)
s3_client.delete_objects(bucket_2, objects_to_delete_b2)
with reporter.step("Check deleted objects are not visible in bucket bucket_2"):
objects_list = s3_client.list_objects_v2(bucket_2)
assert set(put_objects).difference(set(objects_to_delete_b2)) == set(
objects_list
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
for object_key in objects_to_delete_b2:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_2, object_key)
@allure.title("Sync directory (sync_type={sync_type}, s3_client={s3_client})") @allure.title("Sync directory (sync_type={sync_type}, s3_client={s3_client})")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True) @pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
@pytest.mark.parametrize("sync_type", ["sync", "cp"]) @pytest.mark.parametrize("sync_type", ["sync", "cp"])
@ -829,19 +787,29 @@ class TestS3GateObject:
bucket: str, bucket: str,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
): ):
test_file_1 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")) file_path_1 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")
test_file_2 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")) file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"} object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
key_to_path = {"test_file_1": test_file_1.path, "test_file_2": test_file_2.path} key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
generate_file_with_content(simple_object_size.value, test_file_1) generate_file_with_content(simple_object_size.value, file_path=file_path_1)
generate_file_with_content(simple_object_size.value, test_file_2) generate_file_with_content(simple_object_size.value, file_path=file_path_2)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
# TODO: return ACL, when https://github.com/nspcc-dev/neofs-s3-gw/issues/685 will be closed
if sync_type == "sync": if sync_type == "sync":
s3_client.sync(bucket, os.path.dirname(test_file_1), metadata=object_metadata) s3_client.sync(
bucket=bucket,
dir_path=os.path.dirname(file_path_1),
# acl="public-read-write",
metadata=object_metadata,
)
elif sync_type == "cp": elif sync_type == "cp":
s3_client.cp(bucket, os.path.dirname(test_file_1), metadata=object_metadata) s3_client.cp(
bucket=bucket,
dir_path=os.path.dirname(file_path_1),
# acl="public-read-write",
metadata=object_metadata,
)
with reporter.step("Check objects are synced"): with reporter.step("Check objects are synced"):
objects = s3_client.list_objects(bucket) objects = s3_client.list_objects(bucket)
@ -850,40 +818,47 @@ class TestS3GateObject:
with reporter.step("Check these are the same objects"): with reporter.step("Check these are the same objects"):
for obj_key in objects: for obj_key in objects:
got_object = s3_client.get_object(bucket, obj_key) got_object = s3_client.get_object(bucket, obj_key)
assert get_file_hash(got_object) == get_file_hash(key_to_path.get(obj_key)), "Expected hashes are the same" assert get_file_hash(got_object) == get_file_hash(
key_to_path.get(obj_key)
), "Expected hashes are the same"
obj_head = s3_client.head_object(bucket, obj_key) obj_head = s3_client.head_object(bucket, obj_key)
assert obj_head.get("Metadata") == object_metadata, f"Metadata of object is {object_metadata}" assert obj_head.get("Metadata") == object_metadata, f"Metadata of object is {object_metadata}"
object_grants = s3_client.get_object_acl(bucket, obj_key) # Uncomment after https://github.com/nspcc-dev/neofs-s3-gw/issues/685 is solved
s3_helper.verify_acl_permissions(object_grants, PRIVATE_GRANTS) # obj_acl = s3_client.get_object_acl(bucket, obj_key)
# s3_helper.assert_s3_acl(acl_grants = obj_acl, permitted_users = "AllUsers")
@allure.title("Put 10 nested level object (s3_client={s3_client})") @allure.title("Put 10 nested level object (s3_client={s3_client})")
def test_s3_put_10_folder( def test_s3_put_10_folder(
self, self,
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket: str, bucket: str,
temp_directory,
simple_object_size: ObjectSize, simple_object_size: ObjectSize,
): ):
key_characters_sample = string.ascii_letters + string.digits + "._-" path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)])
file_path_1 = os.path.join(temp_directory, path, "test_file_1")
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
file_name = s3_helper.object_key_from_file_path(file_path_1)
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Put object"): with reporter.step("Put object"):
test_file = generate_file(simple_object_size.value) s3_client.put_object(bucket, file_path_1)
obj_key = "/" + "/".join(["".join(random.choices(key_characters_sample, k=5)) for _ in range(10)]) + "/test_file_1" s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
s3_client.put_object(bucket, test_file, obj_key)
with reporter.step("Check object can be downloaded"):
s3_client.get_object(bucket, obj_key)
with reporter.step("Check object listing"):
s3_helper.check_objects_in_bucket(s3_client, bucket, [obj_key])
@allure.title("Delete non-existing object from empty bucket (s3_client={s3_client})") @allure.title("Delete non-existing object from empty bucket (s3_client={s3_client})")
def test_s3_delete_non_existing_object(self, s3_client: S3ClientWrapper, bucket: str): def test_s3_delete_non_existing_object(self, s3_client: S3ClientWrapper, bucket: str):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
objects_list = s3_client.list_objects_versions(bucket)
with reporter.step("Check that bucket is empty"):
assert not objects_list, f"Expected empty bucket, got {objects_list}"
obj_key = "fake_object_key" obj_key = "fake_object_key"
with reporter.step("Delete non-existing object"): with reporter.step("Delete non-existing object"):
delete_obj = s3_client.delete_object(bucket, obj_key) delete_obj = s3_client.delete_object(bucket, obj_key)
# there should be no objects or delete markers in the bucket
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created" assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
objects_list = s3_client.list_objects_versions(bucket) objects_list = s3_client.list_objects_versions(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}" assert not objects_list, f"Expected empty bucket, got {objects_list}"
@ -891,6 +866,9 @@ class TestS3GateObject:
@allure.title("Delete the same object twice (s3_client={s3_client})") @allure.title("Delete the same object twice (s3_client={s3_client})")
def test_s3_delete_twice(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize): def test_s3_delete_twice(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
objects_list = s3_client.list_objects(bucket)
with reporter.step("Check that bucket is empty"):
assert not objects_list, f"Expected empty bucket, got {objects_list}"
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path) file_name = s3_helper.object_key_from_file_path(file_path)
@ -903,7 +881,7 @@ class TestS3GateObject:
versions = s3_client.list_objects_versions(bucket) versions = s3_client.list_objects_versions(bucket)
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == file_name} obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == file_name}
assert obj_versions, f"Object versions were not found {versions}" assert obj_versions, f"Object versions were not found {objects_list}"
assert "DeleteMarker" in delete_object.keys(), "Delete markers not found" assert "DeleteMarker" in delete_object.keys(), "Delete markers not found"
with reporter.step("Delete the object from the bucket again"): with reporter.step("Delete the object from the bucket again"):

View file

@ -1,11 +1,10 @@
import json import os
import allure import allure
import pytest import pytest
from botocore.exceptions import ClientError
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.steps.cli.container import search_container_by_name
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.steps.storage_policy import get_simple_object_copies from frostfs_testlib.steps.storage_policy import get_simple_object_copies
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
@ -14,20 +13,13 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from ....resources.common import S3_POLICY_FILE_LOCATION
@pytest.mark.nightly
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True) @pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
class TestS3GatePolicy(ClusterTestBase): class TestS3GatePolicy(ClusterTestBase):
@allure.title("Bucket creation with retention policy applied (s3_client={s3_client})") @allure.title("Bucket creation with retention policy applied (s3_client={s3_client})")
def test_s3_bucket_location( def test_s3_bucket_location(
self, self, default_wallet: WalletInfo, s3_client: S3ClientWrapper, simple_object_size: ObjectSize
default_wallet: WalletInfo,
s3_client: S3ClientWrapper,
simple_object_size: ObjectSize,
bucket_container_resolver: BucketContainerResolver,
): ):
file_path_1 = generate_file(simple_object_size.value) file_path_1 = generate_file(simple_object_size.value)
file_name_1 = s3_helper.object_key_from_file_path(file_path_1) file_name_1 = s3_helper.object_key_from_file_path(file_path_1)
@ -40,7 +32,9 @@ class TestS3GatePolicy(ClusterTestBase):
bucket_2 = s3_client.create_bucket(location_constraint="rep-3") bucket_2 = s3_client.create_bucket(location_constraint="rep-3")
s3_helper.set_bucket_versioning(s3_client, bucket_2, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket_2, VersioningStatus.ENABLED)
list_buckets = s3_client.list_buckets() list_buckets = s3_client.list_buckets()
assert bucket_1 in list_buckets and bucket_2 in list_buckets, f"Expected two buckets {bucket_1, bucket_2}, got {list_buckets}" assert (
bucket_1 in list_buckets and bucket_2 in list_buckets
), f"Expected two buckets {bucket_1, bucket_2}, got {list_buckets}"
with reporter.step("Check head buckets"): with reporter.step("Check head buckets"):
with expect_not_raises(): with expect_not_raises():
@ -61,7 +55,7 @@ class TestS3GatePolicy(ClusterTestBase):
with reporter.step("Check object policy"): with reporter.step("Check object policy"):
for cluster_node in self.cluster.cluster_nodes: for cluster_node in self.cluster.cluster_nodes:
cid_1 = bucket_container_resolver.resolve(cluster_node, bucket_1) cid_1 = search_container_by_name(name=bucket_1, node=cluster_node)
if cid_1: if cid_1:
break break
copies_1 = get_simple_object_copies( copies_1 = get_simple_object_copies(
@ -73,7 +67,7 @@ class TestS3GatePolicy(ClusterTestBase):
) )
assert copies_1 == 1 assert copies_1 == 1
for cluster_node in self.cluster.cluster_nodes: for cluster_node in self.cluster.cluster_nodes:
cid_2 = bucket_container_resolver.resolve(cluster_node, bucket_2) cid_2 = search_container_by_name(name=bucket_2, node=cluster_node)
if cid_2: if cid_2:
break break
copies_2 = get_simple_object_copies( copies_2 = get_simple_object_copies(
@ -92,44 +86,39 @@ class TestS3GatePolicy(ClusterTestBase):
s3_client.create_bucket(location_constraint="UNEXISTING LOCATION CONSTRAINT") s3_client.create_bucket(location_constraint="UNEXISTING LOCATION CONSTRAINT")
@allure.title("Bucket policy (s3_client={s3_client})") @allure.title("Bucket policy (s3_client={s3_client})")
def test_s3_bucket_policy(self, s3_client: S3ClientWrapper, bucket: str): def test_s3_bucket_policy(self, s3_client: S3ClientWrapper):
with reporter.step("Create bucket"): with reporter.step("Create bucket with default policy"):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with reporter.step("GetBucketPolicy"): with reporter.step("GetBucketPolicy"):
with pytest.raises((RuntimeError, ClientError)):
s3_client.get_bucket_policy(bucket) s3_client.get_bucket_policy(bucket)
with reporter.step("Put new policy"): with reporter.step("Put new policy"):
custom_policy = f"file://{os.getcwd()}/pytest_tests/resources/files/bucket_policy.json"
custom_policy = { custom_policy = {
"Version": "2012-10-17", "Version": "2008-10-17",
"Id": "aaaa-bbbb-cccc-dddd", "Id": "aaaa-bbbb-cccc-dddd",
"Statement": [ "Statement": [
{ {
"Sid": "AddPerm", "Sid": "AddPerm",
"Effect": "Allow", "Effect": "Allow",
"Principal": "*", "Principal": {"AWS": "*"},
"Action": ["s3:GetObject"], "Action": ["s3:GetObject"],
"Resource": [f"arn:aws:s3:::{bucket}/*"], "Resource": [f"arn:aws:s3:::{bucket}/*"],
} }
], ],
} }
s3_client.put_bucket_policy(bucket, custom_policy) s3_client.put_bucket_policy(bucket, custom_policy)
with reporter.step("GetBucketPolicy"): with reporter.step("GetBucketPolicy"):
returned_policy = json.loads(s3_client.get_bucket_policy(bucket)) policy_1 = s3_client.get_bucket_policy(bucket)
assert returned_policy == custom_policy, "Wrong policy was received" print(policy_1)
with reporter.step("Delete the policy"):
s3_client.delete_bucket_policy(bucket)
with reporter.step("GetBucketPolicy"):
with pytest.raises((RuntimeError, ClientError)):
s3_client.get_bucket_policy(bucket)
@allure.title("Bucket CORS (s3_client={s3_client})") @allure.title("Bucket CORS (s3_client={s3_client})")
def test_s3_cors(self, s3_client: S3ClientWrapper, bucket: str): def test_s3_cors(self, s3_client: S3ClientWrapper):
with reporter.step("Create bucket without cors"): with reporter.step("Create bucket without cors"):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with pytest.raises(Exception): with pytest.raises(Exception):

View file

@ -11,7 +11,6 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.nightly
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_tagging @pytest.mark.s3_gate_tagging
class TestS3GateTagging: class TestS3GateTagging:

View file

@ -1,15 +1,12 @@
import os
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content, get_file_content from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
@pytest.mark.nightly
@pytest.mark.s3_gate @pytest.mark.s3_gate
@pytest.mark.s3_gate_versioning @pytest.mark.s3_gate_versioning
class TestS3GateVersioning: class TestS3GateVersioning:
@ -19,67 +16,6 @@ class TestS3GateVersioning:
with pytest.raises(Exception): with pytest.raises(Exception):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED)
@allure.title("Object versioning (s3_client={s3_client})")
def test_s3_api_versioning(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
"""
Test checks basic versioning functionality for S3 bucket.
"""
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with reporter.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple)
generate_file_with_content(simple_object_size.value, file_path=file_name_simple, content=version_2_content)
version_id_2 = s3_client.put_object(bucket, file_name_simple)
with reporter.step("Check bucket shows all versions"):
versions = s3_client.list_objects_versions(bucket)
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
assert obj_versions == {
version_id_1,
version_id_2,
}, f"Expected object has versions: {version_id_1, version_id_2}"
with reporter.step("Show information about particular version"):
for version_id in (version_id_1, version_id_2):
response = s3_client.head_object(bucket, obj_key, version_id=version_id)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert response.get("VersionId") == version_id, f"Expected VersionId is {version_id}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
with reporter.step("Check object's attributes"):
for version_id in (version_id_1, version_id_2):
got_attrs = s3_client.get_object_attributes(bucket, obj_key, ["ETag"], version_id=version_id)
if got_attrs:
assert got_attrs.get("VersionId") == version_id, f"Expected VersionId is {version_id}"
with reporter.step("Delete object and check it was deleted"):
response = s3_client.delete_object(bucket, obj_key)
version_id_delete = response.get("VersionId")
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_object(bucket, obj_key)
with reporter.step("Get content for all versions and check it is correct"):
for version, content in (
(version_id_2, version_2_content),
(version_id_1, version_1_content),
):
file_name = s3_client.get_object(bucket, obj_key, version_id=version)
got_content = get_file_content(file_name)
assert got_content == content, f"Expected object content is\n{content}\nGot\n{got_content}"
with reporter.step("Restore previous object version"):
s3_client.delete_object(bucket, obj_key, version_id=version_id_delete)
file_name = s3_client.get_object(bucket, obj_key)
got_content = get_file_content(file_name)
assert got_content == version_2_content, f"Expected object content is\n{version_2_content}\nGot\n{got_content}"
@allure.title("Enable and disable versioning without object_lock (s3_client={s3_client})") @allure.title("Enable and disable versioning without object_lock (s3_client={s3_client})")
def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize): def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
@ -96,7 +32,9 @@ class TestS3GateVersioning:
actual_version = [version.get("VersionId") for version in object_version if version.get("Key") == file_name] actual_version = [version.get("VersionId") for version in object_version if version.get("Key") == file_name]
assert actual_version == ["null"], f"Expected version is null in list-object-versions, got {object_version}" assert actual_version == ["null"], f"Expected version is null in list-object-versions, got {object_version}"
object_0 = s3_client.head_object(bucket, file_name) object_0 = s3_client.head_object(bucket, file_name)
assert object_0.get("VersionId") == "null", f"Expected version is null in head-object, got {object_0.get('VersionId')}" assert (
object_0.get("VersionId") == "null"
), f"Expected version is null in head-object, got {object_0.get('VersionId')}"
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED) s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)

View file

@ -1,42 +1,77 @@
import logging import logging
from re import fullmatch import os
from http import HTTPStatus
from re import fullmatch, match
import allure import allure
import pytest import pytest
import requests
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.hosting import Hosting from frostfs_testlib.hosting import Hosting
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.utils.env_utils import read_env_properties, save_env_properties
from frostfs_testlib.utils.version_utils import get_remote_binaries_versions from frostfs_testlib.utils.version_utils import get_remote_binaries_versions
from pytest import FixtureRequest
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
VERSION_REGEX = r"^([a-zA-Z0-9]*/)?\d+\.\d+\.\d+(-.*)?(?<!dirty)"
VERSION_ERROR_MSG = "{name} [{host}]: Actual version doesn't conform to format '0.0.0-000-aaaaaaa': {version}"
def _check_version_format(version):
return fullmatch(VERSION_REGEX, version)
@allure.title("Check binaries versions") @allure.title("Check binaries versions")
@pytest.mark.check_binaries @pytest.mark.check_binaries
def test_binaries_versions(hosting: Hosting): def test_binaries_versions(request: FixtureRequest, hosting: Hosting):
""" """
Compare binaries versions from external source (url) and deployed on servers. Compare binaries versions from external source (url) and deployed on servers.
""" """
with reporter.step("Get binaries versions from servers"): with reporter.step("Get binaries versions from servers"):
versions_by_host = get_remote_binaries_versions(hosting) got_versions, exсeptions_remote_binaries_versions = get_remote_binaries_versions(hosting)
environment_dir = request.config.getoption("--alluredir") or ASSETS_DIR
env_file = os.path.join(environment_dir, "environment.properties")
env_properties = read_env_properties(env_file)
# compare versions from servers and file
exсeptions = [] exсeptions = []
additional_env_properties = {}
last_host, versions_on_last_host = versions_by_host.popitem() for binary_name, binary in got_versions.items():
for name, version in versions_on_last_host.items(): version = binary["version"]
for host, versions_on_host in versions_by_host.items(): requires_check = binary["check"]
if versions_on_host[name] != version: if requires_check and not fullmatch(r"^\d+\.\d+\.\d+(-.*)?(?<!dirty)", version):
exсeptions.append(f"Binary of {name} has inconsistent version {versions_on_host[name]} on host {host}") exсeptions.append(f"{binary_name}: Actual version doesn't conform to format '0.0.0-000-aaaaaaa': {version}")
if not _check_version_format(versions_on_host[name]):
exсeptions.append(VERSION_ERROR_MSG.format(name=name, host=host, version=version))
if not _check_version_format(version): # If some binary was not listed in the env properties file, let's add it
exсeptions.append(VERSION_ERROR_MSG.format(name=name, host=last_host, version=version)) # so that we have full information about versions in allure report
if env_properties and binary_name not in env_properties:
additional_env_properties[binary_name] = version
assert not exсeptions, "\n".join(exсeptions) if env_properties and additional_env_properties:
save_env_properties(env_file, additional_env_properties)
exсeptions.extend(exсeptions_remote_binaries_versions)
# create clear beautiful error with aggregation info
if exсeptions:
msg = "\n".join(exсeptions)
raise AssertionError(f"Found binaries with unexpected versions:\n{msg}")
@reporter.step("Download versions info from {url}")
def download_versions_info(url: str) -> dict:
binaries_to_version = {}
response = requests.get(url)
assert response.status_code == HTTPStatus.OK, f"Got {response.status_code} code. Content {response.json()}"
content = response.text
assert content, f"Expected file with content, got {response}"
for line in content.split("\n"):
m = match("(.*)=(.*)", line)
if not m:
logger.warning(f"Could not get binary/version from {line}")
continue
bin_name, bin_version = m.group(1), m.group(2)
binaries_to_version[bin_name] = bin_version
return binaries_to_version

View file

@ -1,25 +1,28 @@
from datetime import datetime
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import CredentialsProvider, User from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import string_utils
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def owner_wallet(default_wallet: WalletInfo) -> WalletInfo: def owner_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
return default_wallet with reporter.step("Create user wallet which owns containers and objects"):
user = User(f"user_{hex(int(datetime.now().timestamp() * 1000000))}")
return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def user_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo: def user_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
with reporter.step("Create user wallet which will use objects from owner via static session"): with reporter.step("Create user wallet which will use objects from owner via static session"):
user = User(string_utils.unique_name("user-")) user = User(f"user_{hex(int(datetime.now().timestamp() * 1000000))}")
return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0]) return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def stranger_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo: def stranger_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
with reporter.step("Create stranger user wallet which should fail to obtain data"): with reporter.step("Create stranger user wallet which should fail to obtain data"):
user = User(string_utils.unique_name("user-")) user = User(f"user_{hex(int(datetime.now().timestamp() * 1000000))}")
return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0]) return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])

View file

@ -10,10 +10,10 @@ from frostfs_testlib.steps.session_token import create_session_token
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.nightly
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.session_token @pytest.mark.session_token
class TestDynamicObjectSession(ClusterTestBase): class TestDynamicObjectSession(ClusterTestBase):

View file

@ -3,7 +3,12 @@ import logging
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import EXPIRED_SESSION_TOKEN, MALFORMED_REQUEST, OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND from frostfs_testlib.resources.error_patterns import (
EXPIRED_SESSION_TOKEN,
MALFORMED_REQUEST,
OBJECT_ACCESS_DENIED,
OBJECT_NOT_FOUND,
)
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import ( from frostfs_testlib.steps.cli.object import (
@ -133,7 +138,6 @@ def static_sessions(
} }
@pytest.mark.nightly
@pytest.mark.static_session @pytest.mark.static_session
class TestObjectStaticSession(ClusterTestBase): class TestObjectStaticSession(ClusterTestBase):
@allure.title("Read operations with static session (method={method_under_test.__name__}, obj_size={object_size})") @allure.title("Read operations with static session (method={method_under_test.__name__}, obj_size={object_size})")

View file

@ -1,14 +1,19 @@
import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.acl import create_eacl, set_eacl, wait_for_cache_expired
from frostfs_testlib.steps.cli.container import create_container, delete_container, get_container, list_containers from frostfs_testlib.steps.cli.container import create_container, delete_container, get_container, list_containers
from frostfs_testlib.steps.session_token import ContainerVerb, get_container_signed_token from frostfs_testlib.steps.session_token import ContainerVerb, get_container_signed_token
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from pytest_tests.helpers.object_access import can_put_object
@pytest.mark.nightly
@pytest.mark.static_session_container @pytest.mark.static_session_container
class TestSessionTokenContainer(ClusterTestBase): class TestSessionTokenContainer(ClusterTestBase):
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
@ -22,9 +27,11 @@ class TestSessionTokenContainer(ClusterTestBase):
""" """
Returns dict with static session token file paths for all verbs with default lifetime Returns dict with static session token file paths for all verbs with default lifetime
""" """
return {verb: get_container_signed_token(owner_wallet, user_wallet, verb, client_shell, temp_directory) for verb in ContainerVerb} return {
verb: get_container_signed_token(owner_wallet, user_wallet, verb, client_shell, temp_directory)
for verb in ContainerVerb
}
@allure.title("Static session with create operation")
def test_static_session_token_container_create( def test_static_session_token_container_create(
self, self,
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
@ -43,13 +50,14 @@ class TestSessionTokenContainer(ClusterTestBase):
wait_for_creation=False, wait_for_creation=False,
) )
container_info: dict[str, str] = get_container(owner_wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) container_info: dict[str, str] = get_container(
owner_wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
assert container_info["ownerID"] == owner_wallet.get_address() assert container_info["ownerID"] == owner_wallet.get_address()
assert cid not in list_containers(user_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) assert cid not in list_containers(user_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
assert cid in list_containers(owner_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) assert cid in list_containers(owner_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
@allure.title("[NEGATIVE] Static session without create operation")
def test_static_session_token_container_create_with_other_verb( def test_static_session_token_container_create_with_other_verb(
self, self,
user_wallet: WalletInfo, user_wallet: WalletInfo,
@ -69,7 +77,6 @@ class TestSessionTokenContainer(ClusterTestBase):
wait_for_creation=False, wait_for_creation=False,
) )
@allure.title("[NEGATIVE] Static session with create operation for other wallet")
def test_static_session_token_container_create_with_other_wallet( def test_static_session_token_container_create_with_other_wallet(
self, self,
stranger_wallet: WalletInfo, stranger_wallet: WalletInfo,
@ -88,7 +95,6 @@ class TestSessionTokenContainer(ClusterTestBase):
wait_for_creation=False, wait_for_creation=False,
) )
@allure.title("Static session with delete operation")
def test_static_session_token_container_delete( def test_static_session_token_container_delete(
self, self,
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
@ -116,3 +122,39 @@ class TestSessionTokenContainer(ClusterTestBase):
) )
assert cid not in list_containers(owner_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) assert cid not in list_containers(owner_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
@pytest.mark.sanity
def test_static_session_token_container_set_eacl(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
stranger_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
simple_object_size: ObjectSize,
):
"""
Validate static session with set eacl operation
"""
with reporter.step("Create container"):
cid = create_container(
owner_wallet,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
file_path = generate_file(simple_object_size.value)
assert can_put_object(stranger_wallet, cid, file_path, self.shell, self.cluster)
with reporter.step("Deny all operations for other via eACL"):
eacl_deny = [EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op) for op in EACLOperation]
set_eacl(
user_wallet,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
session_token=static_sessions[ContainerVerb.SETEACL],
)
wait_for_cache_expired()
assert not can_put_object(stranger_wallet, cid, file_path, self.shell, self.cluster)

View file

@ -18,7 +18,6 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.nightly
@pytest.mark.shard @pytest.mark.shard
class TestControlShard(ClusterTestBase): class TestControlShard(ClusterTestBase):
@staticmethod @staticmethod
@ -32,7 +31,9 @@ class TestControlShard(ClusterTestBase):
data_path = node.storage_node.get_data_directory() data_path = node.storage_node.get_data_directory()
all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip() all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip()
for data_dir in all_datas.replace(".", "").strip().split("\n"): for data_dir in all_datas.replace(".", "").strip().split("\n"):
check_dir = node_shell.exec(f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0").stdout check_dir = node_shell.exec(
f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0"
).stdout
if "1" in check_dir: if "1" in check_dir:
object_path = f"{data_path}/data/{data_dir}/data/{oid_path}" object_path = f"{data_path}/data/{data_dir}/data/{oid_path}"
object_name = f"{oid[4:]}.{cid}" object_name = f"{oid[4:]}.{cid}"
@ -65,7 +66,9 @@ class TestControlShard(ClusterTestBase):
basic_acl=EACL_PUBLIC_READ_WRITE, basic_acl=EACL_PUBLIC_READ_WRITE,
) )
file = generate_file(round(max_object_size * 0.8)) file = generate_file(round(max_object_size * 0.8))
oid = put_object(wallet=default_wallet, path=file, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) oid = put_object(
wallet=default_wallet, path=file, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
with reporter.step("Search node with object"): with reporter.step("Search node with object"):
nodes = get_object_nodes(cluster=self.cluster, cid=cid, oid=oid, alive_node=self.cluster.cluster_nodes[0]) nodes = get_object_nodes(cluster=self.cluster, cid=cid, oid=oid, alive_node=self.cluster.cluster_nodes[0])
@ -73,7 +76,9 @@ class TestControlShard(ClusterTestBase):
object_path, object_name = self.get_object_path_and_name_file(oid, cid, nodes[0]) object_path, object_name = self.get_object_path_and_name_file(oid, cid, nodes[0])
nodes[0].host.get_shell().exec(f"chmod +r {object_path}/{object_name}") nodes[0].host.get_shell().exec(f"chmod +r {object_path}/{object_name}")
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) delete_object(
wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
delete_container(wallet=default_wallet, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) delete_container(wallet=default_wallet, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
@staticmethod @staticmethod
@ -112,7 +117,6 @@ class TestControlShard(ClusterTestBase):
assert set(shards_from_config) == set(shards_from_cli) assert set(shards_from_config) == set(shards_from_cli)
@allure.title("Shard become read-only when errors exceeds threshold")
@pytest.mark.failover @pytest.mark.failover
def test_shard_errors( def test_shard_errors(
self, self,
@ -140,5 +144,5 @@ class TestControlShard(ClusterTestBase):
for shard in ShardsWatcher(node).get_shards(): for shard in ShardsWatcher(node).get_shards():
if shard["blobstor"][1]["path"] in object_path: if shard["blobstor"][1]["path"] in object_path:
with reporter.step(f"Shard - {shard['shard_id']} to {node.host_ip}, mode - {shard['mode']}"): with reporter.step(f"Shard - {shard['shard_id']} to {node.host_ip}, mode - {shard['mode']}"):
assert shard["mode"] == "read-only" assert shard["mode"] == "degraded-read-only"
break break

View file

@ -1,133 +0,0 @@
import os
import shutil
import time
from datetime import datetime, timezone
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.hosting import Host
from frostfs_testlib.testing.cluster_test_base import Cluster
from frostfs_testlib.testing.parallel import parallel
def pytest_generate_tests(metafunc: pytest.Metafunc):
metafunc.fixturenames.append("repo")
metafunc.fixturenames.append("markers")
metafunc.parametrize(
"repo, markers",
[("frostfs-testcases", metafunc.config.option.markexpr)],
)
@pytest.mark.session_logs
class TestLogs:
@pytest.mark.logs_after_session
@pytest.mark.order(1000)
@allure.title("Check logs from frostfs-testcases with marks '{request.config.option.markexpr}' - search errors")
def test_logs_search_errors(self, temp_directory: str, cluster: Cluster, session_start_time: datetime, request: pytest.FixtureRequest):
end_time = datetime.now(timezone.utc)
logs_dir = os.path.join(temp_directory, "logs")
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
regexes = [
r"\bpanic\b",
r"\boom\b",
r"too many",
r"insufficient funds",
r"insufficient amount of gas",
r"cannot assign requested address",
r"\bunable to process\b",
r"\bmaximum number of subscriptions is reached\b",
]
issues_regex = "|".join(regexes)
exclude_filter = r"too many requests"
log_level_priority = "3" # will include 0-3 priority logs (0: emergency 1: alerts 2: critical 3: errors)
time.sleep(2)
futures = parallel(
self._collect_logs_on_host,
cluster.hosts,
logs_dir,
issues_regex,
session_start_time,
end_time,
exclude_filter,
priority=log_level_priority,
)
hosts_with_problems = [future.result() for future in futures if not future.exception() and future.result() is not None]
if hosts_with_problems:
self._attach_logs(logs_dir)
assert not hosts_with_problems, f"The following hosts contains critical errors in system logs: {', '.join(hosts_with_problems)}"
@pytest.mark.order(1001)
@allure.title("Check logs from frostfs-testcases with marks '{request.config.option.markexpr}' - identify sensitive data")
def test_logs_identify_sensitive_data(
self, temp_directory: str, cluster: Cluster, session_start_time: datetime, request: pytest.FixtureRequest
):
end_time = datetime.now(timezone.utc)
logs_dir = os.path.join(temp_directory, "logs")
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
_regex = {
"authorization_basic": r"basic [a-zA-Z0-9=:_\+\/-]{16,100}",
"authorization_bearer": r"bearer [a-zA-Z0-9_\-\.=:_\+\/]{16,100}",
"access_token": r"\"access_token\":\"[0-9a-z]{16}\$[0-9a-f]{32}\"",
"api_token": r"\"api_token\":\"(xox[a-zA-Z]-[a-zA-Z0-9-]+)\"",
"yadro_access_token": r"[a-zA-Z0-9_-]*:[a-zA-Z0-9_\-]+@yadro\.com*",
"SSH_privKey": r"([-]+BEGIN [^\s]+ PRIVATE KEY[-]+[\s]*[^-]*[-]+END [^\s]+ PRIVATE KEY[-]+)",
"possible_Creds": r"(?i)(" r"password\s*[`=:]+\s*[^\s]+|" r"password is\s*[`=:]+\s*[^\s]+|" r"passwd\s*[`=:]+\s*[^\s]+)",
}
issues_regex = "|".join(_regex.values())
exclude_filter = r"COMMAND=\|--\sBoot\s"
time.sleep(2)
futures = parallel(
self._collect_logs_on_host,
cluster.hosts,
logs_dir,
issues_regex,
session_start_time,
end_time,
exclude_filter,
)
hosts_with_problems = [future.result() for future in futures if not future.exception() and future.result() is not None]
if hosts_with_problems:
self._attach_logs(logs_dir)
assert not hosts_with_problems, f"The following hosts contains sensitive data in system logs: {', '.join(hosts_with_problems)}"
def _collect_logs_on_host(
self,
host: Host,
logs_dir: str,
regex: str,
since: datetime,
until: datetime,
exclude_filter: str,
priority: str = None,
):
with reporter.step(f"Get logs from {host.config.address}"):
logs = host.get_filtered_logs(filter_regex=regex, since=since, until=until, exclude_filter=exclude_filter, priority=priority)
if not logs:
return None
with open(os.path.join(logs_dir, f"{host.config.address}.log"), "w") as file:
file.write(logs)
return host.config.address
def _attach_logs(self, logs_dir: str) -> None:
# Zip all files and attach to Allure because it is more convenient to download a single
# zip with all logs rather than mess with individual logs files per service or node
logs_zip_file_path = shutil.make_archive(logs_dir, "zip", logs_dir)
reporter.attach(logs_zip_file_path, "logs.zip")

View file

@ -0,0 +1,82 @@
import os
import shutil
import time
from datetime import datetime
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.hosting import Host
from frostfs_testlib.testing.cluster_test_base import Cluster
from frostfs_testlib.testing.parallel import parallel
def pytest_generate_tests(metafunc: pytest.Metafunc):
metafunc.fixturenames.append("repo")
metafunc.fixturenames.append("markers")
metafunc.parametrize(
"repo, markers",
[("frostfs-testcases", metafunc.config.option.markexpr)],
)
class TestLogs:
@allure.title("Check logs from frostfs-testcases with marks '{request.config.option.markexpr}'")
@pytest.mark.logs_after_session
@pytest.mark.no_healthcheck
def test_logs_after_session(
self, temp_directory: str, cluster: Cluster, session_start_time: datetime, request: pytest.FixtureRequest
):
"""
This test automatically added to any test run to check logs from cluster for critical errors.
"""
end_time = datetime.utcnow()
logs_dir = os.path.join(temp_directory, "logs")
os.makedirs(logs_dir)
# Using \b here because 'oom' and 'panic' can sometimes be found in OID or CID
issues_regex = r"\bpanic\b|\boom\b|too many|insufficient funds|insufficient amount of gas|wallet passwd|secret \bkey\b|access \bkey\b|cannot assign requested address"
exclude_filter = r"too many requests"
time.sleep(2)
futures = parallel(
self._collect_logs_on_host,
cluster.hosts,
logs_dir,
issues_regex,
session_start_time,
end_time,
exclude_filter,
)
hosts_with_problems = [
future.result() for future in futures if not future.exception() and future.result() is not None
]
if hosts_with_problems:
self._attach_logs(logs_dir)
assert (
not hosts_with_problems
), f"The following hosts contains contain critical errors in system logs: {', '.join(hosts_with_problems)}"
def _collect_logs_on_host(
self, host: Host, logs_dir: str, regex: str, since: datetime, until: datetime, exclude_filter: str
):
with reporter.step(f"Get logs from {host.config.address}"):
logs = host.get_filtered_logs(filter_regex=regex, since=since, until=until, exclude_filter=exclude_filter)
if not logs:
return None
with open(os.path.join(logs_dir, f"{host.config.address}.log"), "w") as file:
file.write(logs)
return host.config.address
def _attach_logs(self, logs_dir: str) -> None:
# Zip all files and attach to Allure because it is more convenient to download a single
# zip with all logs rather than mess with individual logs files per service or node
logs_zip_file_path = shutil.make_archive(logs_dir, "zip", logs_dir)
reporter.attach(logs_zip_file_path, "logs.zip")

View file

@ -1,9 +1,10 @@
allure-pytest==2.13.2 allure-pytest==2.13.2
allure-python-commons==2.13.2 allure-python-commons==2.13.2
base58==2.1.0 base58==2.1.0
boto3==1.35.30 boto3==1.16.33
botocore==1.19.33 botocore==1.19.33
configobj==5.0.6 configobj==5.0.6
frostfs-testlib>=2.0.1
neo-mamba==1.0.0 neo-mamba==1.0.0
pexpect==4.8.0 pexpect==4.8.0
pyyaml==6.0.1 pyyaml==6.0.1