forked from TrueCloudLab/frostfs-testcases
Compare commits
31 commits
Author | SHA1 | Date | |
---|---|---|---|
453286d459 | |||
b999d7cf9b | |||
7a1d2c9e2d | |||
bb796a61d3 | |||
abd810cef6 | |||
d8a3f51787 | |||
91cd71de19 | |||
20c2948818 | |||
6f254af8b1 | |||
1ee5f73243 | |||
aef9d43979 | |||
0e72717dcd | |||
b33514df3c | |||
09acd6f283 | |||
b7669fc96f | |||
75508cc70c | |||
6b83a89b94 | |||
77126f2706 | |||
64bc778116 | |||
6442a52abd | |||
8dcb3ccf3c | |||
44ed00f9bc | |||
d10e5975e7 | |||
f6576d4f6f | |||
1afadfa363 | |||
7d0fa79fb2 | |||
64c70948f9 | |||
8234a0ece2 | |||
9528ff0333 | |||
ffdfff6ba0 | |||
ccdd6ab784 |
76 changed files with 3875 additions and 3149 deletions
26
.forgejo/workflows/dco.yml
Normal file
26
.forgejo/workflows/dco.yml
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
# yamllint disable rule:truthy
|
||||||
|
|
||||||
|
name: DCO check
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dco:
|
||||||
|
name: DCO
|
||||||
|
runs-on: docker
|
||||||
|
container:
|
||||||
|
image: node:22-bookworm
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '1.22'
|
||||||
|
|
||||||
|
- name: Run commit format checker
|
||||||
|
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||||
|
with:
|
||||||
|
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
22
.github/workflows/dco.yml
vendored
22
.github/workflows/dco.yml
vendored
|
@ -1,22 +0,0 @@
|
||||||
name: DCO check
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- develop
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
commits_check_job:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Commits Check
|
|
||||||
steps:
|
|
||||||
- name: Get PR Commits
|
|
||||||
id: 'get-pr-commits'
|
|
||||||
uses: tim-actions/get-pr-commits@master
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: DCO Check
|
|
||||||
uses: tim-actions/dco@master
|
|
||||||
with:
|
|
||||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
|
|
@ -10,10 +10,14 @@ repos:
|
||||||
- id: isort
|
- id: isort
|
||||||
name: isort (python)
|
name: isort (python)
|
||||||
- repo: https://git.frostfs.info/TrueCloudLab/allure-validator
|
- repo: https://git.frostfs.info/TrueCloudLab/allure-validator
|
||||||
rev: 1.0.1
|
rev: 1.1.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: allure-validator
|
- id: allure-validator
|
||||||
args: ["pytest_tests/"]
|
args: [
|
||||||
|
"pytest_tests/",
|
||||||
|
"--plugins",
|
||||||
|
"frostfs[-_]testlib*",
|
||||||
|
]
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
|
||||||
ci:
|
ci:
|
||||||
|
|
0
__init__.py
Normal file
0
__init__.py
Normal file
13
pytest.ini
13
pytest.ini
|
@ -13,6 +13,8 @@ markers =
|
||||||
# controlling markers
|
# controlling markers
|
||||||
order: manual control of test order
|
order: manual control of test order
|
||||||
logs_after_session: Make the last test in session
|
logs_after_session: Make the last test in session
|
||||||
|
# parametrizing markers
|
||||||
|
container: specify container details for container creation
|
||||||
# functional markers
|
# functional markers
|
||||||
maintenance: tests for change mode node
|
maintenance: tests for change mode node
|
||||||
container: tests for container creation
|
container: tests for container creation
|
||||||
|
@ -35,11 +37,12 @@ markers =
|
||||||
session_token: tests for operations with session token
|
session_token: tests for operations with session token
|
||||||
static_session: tests for operations with static session token
|
static_session: tests for operations with static session token
|
||||||
bearer: tests for bearer tokens
|
bearer: tests for bearer tokens
|
||||||
acl: All tests for ACL
|
ape: tests for APE
|
||||||
acl_basic: tests for basic ACL
|
ape_allow: tests for APE allow rules
|
||||||
acl_bearer: tests for ACL with bearer
|
ape_deny: tests for APE deny rules
|
||||||
acl_extended: tests for extended ACL
|
ape_container: tests for APE on container operations
|
||||||
acl_filters: tests for extended ACL with filters and headers
|
ape_object: tests for APE on object operations
|
||||||
|
ape_namespace: tests for APE on namespace scope
|
||||||
storage_group: tests for storage groups
|
storage_group: tests for storage groups
|
||||||
failover: tests for system recovery after a failure
|
failover: tests for system recovery after a failure
|
||||||
failover_panic: tests for system recovery after panic reboot of a node
|
failover_panic: tests for system recovery after panic reboot of a node
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
TESTS_BASE_PATH = os.path.dirname(os.path.relpath(__file__))
|
0
pytest_tests/helpers/__init__.py
Normal file
0
pytest_tests/helpers/__init__.py
Normal file
|
@ -6,7 +6,7 @@ from frostfs_testlib.storage.cluster import Cluster
|
||||||
from frostfs_testlib.storage.dataclasses import ape
|
from frostfs_testlib.storage.dataclasses import ape
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
|
||||||
from pytest_tests.helpers.object_access import (
|
from ..helpers.object_access import (
|
||||||
can_delete_object,
|
can_delete_object,
|
||||||
can_get_head_object,
|
can_get_head_object,
|
||||||
can_get_object,
|
can_get_object,
|
||||||
|
|
79
pytest_tests/helpers/container_creation.py
Normal file
79
pytest_tests/helpers/container_creation.py
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
|
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
|
from frostfs_testlib.shell.interfaces import Shell
|
||||||
|
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
|
from frostfs_testlib.storage.dataclasses import ape
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing.parallel import parallel
|
||||||
|
from frostfs_testlib.utils import datetime_utils
|
||||||
|
|
||||||
|
from .container_request import ContainerRequest, MultipleContainersRequest
|
||||||
|
|
||||||
|
|
||||||
|
def create_container_with_ape(
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
wallet: WalletInfo,
|
||||||
|
shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
endpoint: str,
|
||||||
|
) -> str:
|
||||||
|
with reporter.step("Create container"):
|
||||||
|
cid = _create_container_by_spec(container_request, wallet, shell, cluster, endpoint)
|
||||||
|
|
||||||
|
if container_request.ape_rules:
|
||||||
|
with reporter.step("Apply APE rules for container"):
|
||||||
|
_apply_ape_rules(cid, frostfs_cli, endpoint, container_request.ape_rules)
|
||||||
|
|
||||||
|
with reporter.step("Wait for one block"):
|
||||||
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
||||||
|
|
||||||
|
with reporter.step("Search nodes holding the container"):
|
||||||
|
container_holder_nodes = search_nodes_with_container(wallet, cid, shell, cluster.default_rpc_endpoint, cluster)
|
||||||
|
report_data = {node.id: node.host_ip for node in container_holder_nodes}
|
||||||
|
|
||||||
|
reporter.attach(json.dumps(report_data, indent=2), "container_nodes.json")
|
||||||
|
|
||||||
|
return cid
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step("Create multiple containers with APE")
|
||||||
|
def create_containers_with_ape(
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
wallet: WalletInfo,
|
||||||
|
shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
endpoint: str,
|
||||||
|
multiple_containers_request: MultipleContainersRequest,
|
||||||
|
) -> list[str]:
|
||||||
|
cids_futures = parallel(create_container_with_ape, multiple_containers_request, frostfs_cli, wallet, shell, cluster, endpoint)
|
||||||
|
return [future.result() for future in cids_futures]
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step("Create container by spec {container_request}")
|
||||||
|
def _create_container_by_spec(
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
wallet: WalletInfo,
|
||||||
|
shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
endpoint: str,
|
||||||
|
) -> str:
|
||||||
|
return create_container(wallet, shell, endpoint, container_request.parsed_rule(cluster), wait_for_creation=False)
|
||||||
|
|
||||||
|
|
||||||
|
def _apply_ape_rules(cid: str, frostfs_cli: FrostfsCli, endpoint: str, ape_rules: list[ape.Rule]):
|
||||||
|
for ape_rule in ape_rules:
|
||||||
|
rule_str = ape_rule.as_string()
|
||||||
|
with reporter.step(f"Apply APE rule '{rule_str}' for container {cid}"):
|
||||||
|
frostfs_cli.ape_manager.add(
|
||||||
|
endpoint,
|
||||||
|
ape_rule.chain_id,
|
||||||
|
target_name=cid,
|
||||||
|
target_type="container",
|
||||||
|
rule=rule_str,
|
||||||
|
)
|
104
pytest_tests/helpers/container_request.py
Normal file
104
pytest_tests/helpers/container_request.py
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib.steps.cli.container import DEFAULT_PLACEMENT_RULE
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
|
from frostfs_testlib.storage.dataclasses import ape
|
||||||
|
|
||||||
|
APE_EVERYONE_ALLOW_ALL = [ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL)]
|
||||||
|
# In case if we need container operations
|
||||||
|
# ape.Rule(ape.Verb.ALLOW, ape.ContainerOperations.WILDCARD_ALL)]
|
||||||
|
APE_OWNER_ALLOW_ALL = [ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, ape.Condition.by_role(ape.Role.OWNER))]
|
||||||
|
# In case if we need container operations
|
||||||
|
# ape.Rule(ape.Verb.ALLOW, ape.ContainerOperations.WILDCARD_ALL, ape.Condition.by_role(ape.Role.OWNER))]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ContainerRequest:
|
||||||
|
policy: str = None
|
||||||
|
ape_rules: list[ape.Rule] = None
|
||||||
|
|
||||||
|
short_name: str | None = None
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
if self.ape_rules is None:
|
||||||
|
self.ape_rules = []
|
||||||
|
|
||||||
|
# For pytest instead of ids=[...] everywhere
|
||||||
|
self.__name__ = self.short_name
|
||||||
|
|
||||||
|
def parsed_rule(self, cluster: Cluster):
|
||||||
|
if self.policy is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
substitutions = {"%NODE_COUNT%": str(len(cluster.cluster_nodes))}
|
||||||
|
|
||||||
|
parsed_rule = self.policy
|
||||||
|
for sub, replacement in substitutions.items():
|
||||||
|
parsed_rule = parsed_rule.replace(sub, replacement)
|
||||||
|
|
||||||
|
return parsed_rule
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if self.short_name:
|
||||||
|
return self.short_name
|
||||||
|
|
||||||
|
spec_info: list[str] = []
|
||||||
|
|
||||||
|
if self.policy:
|
||||||
|
spec_info.append(f"policy='{self.policy}'")
|
||||||
|
if self.ape_rules:
|
||||||
|
ape_rules_list = ", ".join([f"'{rule.as_string()}'" for rule in self.ape_rules])
|
||||||
|
spec_info.append(f"ape_rules=[{ape_rules_list}]")
|
||||||
|
|
||||||
|
return f"({', '.join(spec_info)})"
|
||||||
|
|
||||||
|
|
||||||
|
class MultipleContainersRequest(list[ContainerRequest]):
|
||||||
|
def __init__(self, iterable=None):
|
||||||
|
"""Override initializer which can accept iterable"""
|
||||||
|
super(MultipleContainersRequest, self).__init__()
|
||||||
|
if iterable:
|
||||||
|
self.extend(iterable)
|
||||||
|
self.__set_name()
|
||||||
|
|
||||||
|
def __set_name(self):
|
||||||
|
self.__name__ = ", ".join([s.__name__ for s in self])
|
||||||
|
|
||||||
|
|
||||||
|
PUBLIC_WITH_POLICY = partial(ContainerRequest, ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="Custom_policy_with_allow_all_ape_rule")
|
||||||
|
|
||||||
|
# REPS
|
||||||
|
REP_1_1_1 = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
|
||||||
|
|
||||||
|
REP_2_1_2 = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
|
||||||
|
REP_2_1_4 = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||||
|
|
||||||
|
REP_2_2_2 = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||||
|
REP_2_2_4 = "REP 2 IN X CBF 2 SELECT 4 FROM * AS X"
|
||||||
|
#
|
||||||
|
|
||||||
|
# Public means it has APE rule which allows everything for everyone
|
||||||
|
REP_1_1_1_PUBLIC = PUBLIC_WITH_POLICY(REP_1_1_1, short_name="REP 1 CBF 1 SELECT 1 (public)")
|
||||||
|
|
||||||
|
REP_2_1_2_PUBLIC = PUBLIC_WITH_POLICY(REP_2_1_2, short_name="REP 2 CBF 1 SELECT 2 (public)")
|
||||||
|
REP_2_1_4_PUBLIC = PUBLIC_WITH_POLICY(REP_2_1_4, short_name="REP 2 CBF 1 SELECT 4 (public)")
|
||||||
|
|
||||||
|
REP_2_2_2_PUBLIC = PUBLIC_WITH_POLICY(REP_2_2_2, short_name="REP 2 CBF 2 SELECT 2 (public)")
|
||||||
|
REP_2_2_4_PUBLIC = PUBLIC_WITH_POLICY(REP_2_2_4, short_name="REP 2 CBF 2 SELECT 4 (public)")
|
||||||
|
#
|
||||||
|
|
||||||
|
EVERYONE_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="Everyone_Allow_All")
|
||||||
|
OWNER_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_OWNER_ALLOW_ALL, short_name="Owner_Allow_All")
|
||||||
|
PRIVATE = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=[], short_name="Private_No_APE")
|
||||||
|
|
||||||
|
|
||||||
|
def requires_container(container_request: None | ContainerRequest | list[ContainerRequest] = None) -> pytest.MarkDecorator:
|
||||||
|
if container_request is None:
|
||||||
|
container_request = EVERYONE_ALLOW_ALL
|
||||||
|
|
||||||
|
if not isinstance(container_request, list):
|
||||||
|
container_request = [container_request]
|
||||||
|
|
||||||
|
return pytest.mark.parametrize("container_request", container_request, indirect=True)
|
|
@ -2,7 +2,7 @@ from typing import Optional
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
||||||
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND
|
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.object import (
|
from frostfs_testlib.steps.cli.object import (
|
||||||
delete_object,
|
delete_object,
|
||||||
|
@ -20,10 +20,6 @@ from frostfs_testlib.utils.file_utils import get_file_hash
|
||||||
|
|
||||||
OPERATION_ERROR_TYPE = RuntimeError
|
OPERATION_ERROR_TYPE = RuntimeError
|
||||||
|
|
||||||
# TODO: Revert to just OBJECT_ACCESS_DENIED when the issue is fixed
|
|
||||||
# https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1297
|
|
||||||
OBJECT_NO_ACCESS = rf"(?:{OBJECT_NOT_FOUND}|{OBJECT_ACCESS_DENIED})"
|
|
||||||
|
|
||||||
|
|
||||||
def can_get_object(
|
def can_get_object(
|
||||||
wallet: WalletInfo,
|
wallet: WalletInfo,
|
||||||
|
@ -47,7 +43,7 @@ def can_get_object(
|
||||||
cluster=cluster,
|
cluster=cluster,
|
||||||
)
|
)
|
||||||
except OPERATION_ERROR_TYPE as err:
|
except OPERATION_ERROR_TYPE as err:
|
||||||
assert string_utils.is_str_match_pattern(err, OBJECT_NO_ACCESS), f"Expected {err} to match {OBJECT_NO_ACCESS}"
|
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
|
||||||
return False
|
return False
|
||||||
assert get_file_hash(file_name) == get_file_hash(got_file_path)
|
assert get_file_hash(file_name) == get_file_hash(got_file_path)
|
||||||
return True
|
return True
|
||||||
|
@ -102,7 +98,7 @@ def can_delete_object(
|
||||||
endpoint=endpoint,
|
endpoint=endpoint,
|
||||||
)
|
)
|
||||||
except OPERATION_ERROR_TYPE as err:
|
except OPERATION_ERROR_TYPE as err:
|
||||||
assert string_utils.is_str_match_pattern(err, OBJECT_NO_ACCESS), f"Expected {err} to match {OBJECT_NO_ACCESS}"
|
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -130,7 +126,7 @@ def can_get_head_object(
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
except OPERATION_ERROR_TYPE as err:
|
except OPERATION_ERROR_TYPE as err:
|
||||||
assert string_utils.is_str_match_pattern(err, OBJECT_NO_ACCESS), f"Expected {err} to match {OBJECT_NO_ACCESS}"
|
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -159,7 +155,7 @@ def can_get_range_of_object(
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
except OPERATION_ERROR_TYPE as err:
|
except OPERATION_ERROR_TYPE as err:
|
||||||
assert string_utils.is_str_match_pattern(err, OBJECT_NO_ACCESS), f"Expected {err} to match {OBJECT_NO_ACCESS}"
|
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -188,7 +184,7 @@ def can_get_range_hash_of_object(
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
except OPERATION_ERROR_TYPE as err:
|
except OPERATION_ERROR_TYPE as err:
|
||||||
assert string_utils.is_str_match_pattern(err, OBJECT_NO_ACCESS), f"Expected {err} to match {OBJECT_NO_ACCESS}"
|
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -215,7 +211,7 @@ def can_search_object(
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
except OPERATION_ERROR_TYPE as err:
|
except OPERATION_ERROR_TYPE as err:
|
||||||
assert string_utils.is_str_match_pattern(err, OBJECT_NO_ACCESS), f"Expected {err} to match {OBJECT_NO_ACCESS}"
|
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
|
||||||
return False
|
return False
|
||||||
if oid:
|
if oid:
|
||||||
return oid in oids
|
return oid in oids
|
||||||
|
|
27
pytest_tests/helpers/policy_validation.py
Normal file
27
pytest_tests/helpers/policy_validation.py
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
from frostfs_testlib.shell.interfaces import Shell
|
||||||
|
from frostfs_testlib.steps.cli.container import get_container
|
||||||
|
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo
|
||||||
|
|
||||||
|
from ..helpers.utility import placement_policy_from_container
|
||||||
|
|
||||||
|
|
||||||
|
def validate_object_policy(wallet: str, shell: Shell, placement_rule: str, cid: str, endpoint: str):
|
||||||
|
got_policy = placement_policy_from_container(get_container(wallet, cid, shell, endpoint, False))
|
||||||
|
assert got_policy.replace("'", "") == placement_rule.replace(
|
||||||
|
"'", ""
|
||||||
|
), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same"
|
||||||
|
|
||||||
|
|
||||||
|
def get_netmap_param(netmap_info: list[NodeNetmapInfo]) -> dict:
|
||||||
|
dict_external = dict()
|
||||||
|
for node in netmap_info:
|
||||||
|
external_adress = node.external_address[0].split("/")[2]
|
||||||
|
dict_external[external_adress] = {
|
||||||
|
"country": node.country,
|
||||||
|
"country_code": node.country_code,
|
||||||
|
"Price": node.price,
|
||||||
|
"continent": node.continent,
|
||||||
|
"un_locode": node.un_locode,
|
||||||
|
"location": node.location,
|
||||||
|
}
|
||||||
|
return dict_external
|
|
@ -35,3 +35,16 @@ def wait_for_gc_pass_on_storage_nodes() -> None:
|
||||||
wait_time = datetime_utils.parse_time(STORAGE_GC_TIME)
|
wait_time = datetime_utils.parse_time(STORAGE_GC_TIME)
|
||||||
with reporter.step(f"Wait {wait_time}s until GC completes on storage nodes"):
|
with reporter.step(f"Wait {wait_time}s until GC completes on storage nodes"):
|
||||||
time.sleep(wait_time)
|
time.sleep(wait_time)
|
||||||
|
|
||||||
|
|
||||||
|
def are_numbers_similar(num1, num2, tolerance_percentage: float = 1.0):
|
||||||
|
"""
|
||||||
|
if difference of numbers is less than permissible deviation than numbers are similar
|
||||||
|
"""
|
||||||
|
# Calculate the permissible deviation
|
||||||
|
average = (num1 + num2) / 2
|
||||||
|
tolerance = average * (tolerance_percentage / 100)
|
||||||
|
|
||||||
|
# Calculate the real difference
|
||||||
|
difference = abs(num1 - num2)
|
||||||
|
return difference <= tolerance
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from .. import TESTS_BASE_PATH
|
||||||
|
|
||||||
TEST_CYCLES_COUNT = int(os.getenv("TEST_CYCLES_COUNT", "1"))
|
TEST_CYCLES_COUNT = int(os.getenv("TEST_CYCLES_COUNT", "1"))
|
||||||
|
|
||||||
DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env"))
|
DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env"))
|
||||||
|
S3_POLICY_FILE_LOCATION = os.path.join(TESTS_BASE_PATH, "resources/files/policy.json")
|
||||||
|
|
|
@ -1,123 +0,0 @@
|
||||||
import allure
|
|
||||||
import pytest
|
|
||||||
from frostfs_testlib import reporter
|
|
||||||
from frostfs_testlib.resources.wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F
|
|
||||||
from frostfs_testlib.shell import Shell
|
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
|
||||||
|
|
||||||
from pytest_tests.helpers.container_access import assert_full_access_to_container, assert_no_access_to_container, assert_read_only_container
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.sanity
|
|
||||||
@pytest.mark.smoke
|
|
||||||
@pytest.mark.acl
|
|
||||||
class TestACLBasic(ClusterTestBase):
|
|
||||||
@pytest.fixture(scope="module")
|
|
||||||
def public_container(self, default_wallet: WalletInfo):
|
|
||||||
with reporter.step("Create public container"):
|
|
||||||
cid_public = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, basic_acl=PUBLIC_ACL_F)
|
|
||||||
|
|
||||||
return cid_public
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
|
||||||
def private_container(self, default_wallet: WalletInfo):
|
|
||||||
with reporter.step("Create private container"):
|
|
||||||
cid_private = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, basic_acl=PRIVATE_ACL_F)
|
|
||||||
|
|
||||||
return cid_private
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
|
||||||
def readonly_container(self, default_wallet: WalletInfo):
|
|
||||||
with reporter.step("Create public readonly container"):
|
|
||||||
cid_read_only = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, basic_acl=READONLY_ACL_F)
|
|
||||||
|
|
||||||
return cid_read_only
|
|
||||||
|
|
||||||
@allure.title("Operations in public container available to everyone (obj_size={object_size})")
|
|
||||||
def test_basic_acl_public(
|
|
||||||
self,
|
|
||||||
default_wallet: WalletInfo,
|
|
||||||
other_wallet: WalletInfo,
|
|
||||||
client_shell: Shell,
|
|
||||||
public_container: str,
|
|
||||||
file_path: str,
|
|
||||||
cluster: Cluster,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Test access to object operations in public container.
|
|
||||||
"""
|
|
||||||
|
|
||||||
for wallet, role in ((default_wallet, "owner"), (other_wallet, "others")):
|
|
||||||
with reporter.step("Put objects to container"):
|
|
||||||
# We create new objects for each wallet because assert_full_access_to_container
|
|
||||||
# deletes the object
|
|
||||||
owner_object_oid = put_object_to_random_node(
|
|
||||||
default_wallet,
|
|
||||||
file_path,
|
|
||||||
public_container,
|
|
||||||
shell=self.shell,
|
|
||||||
cluster=self.cluster,
|
|
||||||
attributes={"created": "owner"},
|
|
||||||
)
|
|
||||||
other_object_oid = put_object_to_random_node(
|
|
||||||
other_wallet,
|
|
||||||
file_path,
|
|
||||||
public_container,
|
|
||||||
shell=self.shell,
|
|
||||||
cluster=self.cluster,
|
|
||||||
attributes={"created": "other"},
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step(f"Check {role} has full access to public container"):
|
|
||||||
assert_full_access_to_container(wallet, public_container, owner_object_oid, file_path, client_shell, cluster)
|
|
||||||
assert_full_access_to_container(wallet, public_container, other_object_oid, file_path, client_shell, cluster)
|
|
||||||
|
|
||||||
@allure.title("Operations in private container only available to owner (obj_size={object_size})")
|
|
||||||
def test_basic_acl_private(
|
|
||||||
self,
|
|
||||||
default_wallet: WalletInfo,
|
|
||||||
other_wallet: WalletInfo,
|
|
||||||
client_shell: Shell,
|
|
||||||
private_container: str,
|
|
||||||
file_path: str,
|
|
||||||
cluster: Cluster,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Test access to object operations in private container.
|
|
||||||
"""
|
|
||||||
|
|
||||||
with reporter.step("Put object to container"):
|
|
||||||
owner_object_oid = put_object_to_random_node(default_wallet, file_path, private_container, client_shell, cluster)
|
|
||||||
|
|
||||||
with reporter.step("Check no one except owner has access to operations with container"):
|
|
||||||
assert_no_access_to_container(other_wallet, private_container, owner_object_oid, file_path, client_shell, cluster)
|
|
||||||
|
|
||||||
with reporter.step("Check owner has full access to private container"):
|
|
||||||
assert_full_access_to_container(default_wallet, private_container, owner_object_oid, file_path, self.shell, cluster)
|
|
||||||
|
|
||||||
@allure.title("Read operations in readonly container available to others (obj_size={object_size})")
|
|
||||||
def test_basic_acl_readonly(
|
|
||||||
self,
|
|
||||||
default_wallet: WalletInfo,
|
|
||||||
other_wallet: WalletInfo,
|
|
||||||
client_shell: Shell,
|
|
||||||
readonly_container: str,
|
|
||||||
file_path: str,
|
|
||||||
cluster: Cluster,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Test access to object operations in readonly container.
|
|
||||||
"""
|
|
||||||
|
|
||||||
with reporter.step("Put object to container"):
|
|
||||||
object_oid = put_object_to_random_node(default_wallet, file_path, readonly_container, client_shell, cluster)
|
|
||||||
|
|
||||||
with reporter.step("Check others has read-only access to operations with container"):
|
|
||||||
assert_read_only_container(other_wallet, readonly_container, object_oid, file_path, client_shell, cluster)
|
|
||||||
|
|
||||||
with reporter.step("Check owner has full access to public container"):
|
|
||||||
assert_full_access_to_container(default_wallet, readonly_container, object_oid, file_path, client_shell, cluster)
|
|
|
@ -2,8 +2,6 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
||||||
from frostfs_testlib.steps.node_management import drop_object
|
from frostfs_testlib.steps.node_management import drop_object
|
||||||
from frostfs_testlib.storage.dataclasses import ape
|
from frostfs_testlib.storage.dataclasses import ape
|
||||||
|
@ -13,74 +11,69 @@ from frostfs_testlib.utils import wallet_utils
|
||||||
from frostfs_testlib.utils.failover_utils import wait_object_replication
|
from frostfs_testlib.utils.failover_utils import wait_object_replication
|
||||||
from frostfs_testlib.utils.file_utils import TestFile
|
from frostfs_testlib.utils.file_utils import TestFile
|
||||||
|
|
||||||
from pytest_tests.helpers.container_access import (
|
from ....helpers.container_access import (
|
||||||
ALL_OBJECT_OPERATIONS,
|
ALL_OBJECT_OPERATIONS,
|
||||||
assert_access_to_container,
|
assert_access_to_container,
|
||||||
assert_full_access_to_container,
|
assert_full_access_to_container,
|
||||||
assert_no_access_to_container,
|
assert_no_access_to_container,
|
||||||
)
|
)
|
||||||
|
from ....helpers.container_request import APE_EVERYONE_ALLOW_ALL, OWNER_ALLOW_ALL, ContainerRequest
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def denied_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.Role) -> WalletInfo:
|
||||||
|
return other_wallet if role == ape.Role.OTHERS else default_wallet
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def allowed_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.Role) -> WalletInfo:
|
||||||
|
return default_wallet if role == ape.Role.OTHERS else other_wallet
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.ape
|
@pytest.mark.ape
|
||||||
class TestApeContainer(ClusterTestBase):
|
class TestApeContainer(ClusterTestBase):
|
||||||
@pytest.fixture(scope="function")
|
|
||||||
def full_placement_container_with_object(self, default_wallet: WalletInfo, file_path: str) -> tuple[str, str, str]:
|
|
||||||
storage_nodes = self.cluster.storage_nodes
|
|
||||||
node_count = len(storage_nodes)
|
|
||||||
with reporter.step("Create public container with full placement rule"):
|
|
||||||
full_placement_rule = f"REP {node_count} IN X CBF 1 SELECT {node_count} FROM * AS X"
|
|
||||||
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, full_placement_rule, PUBLIC_ACL)
|
|
||||||
|
|
||||||
with reporter.step("Put object to container"):
|
|
||||||
oid = put_object_to_random_node(default_wallet, file_path, cid, shell=self.shell, cluster=self.cluster)
|
|
||||||
wait_object_replication(cid, oid, node_count, shell=self.shell, nodes=storage_nodes)
|
|
||||||
|
|
||||||
yield cid, oid
|
|
||||||
|
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
@allure.title("Deny operations via APE by role (role={role}, obj_size={object_size})")
|
@allure.title("Deny operations via APE by role (role={role}, obj_size={object_size})")
|
||||||
@pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True)
|
@pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True)
|
||||||
def test_deny_operations_via_ape_by_role(
|
def test_deny_operations_via_ape_by_role(
|
||||||
self,
|
self,
|
||||||
default_wallet: WalletInfo,
|
denied_wallet: WalletInfo,
|
||||||
other_wallet: WalletInfo,
|
allowed_wallet: WalletInfo,
|
||||||
frostfs_cli: FrostfsCli,
|
frostfs_cli: FrostfsCli,
|
||||||
container_with_objects: tuple[str, list[str], str],
|
container: str,
|
||||||
|
objects: list[str],
|
||||||
role: ape.Role,
|
role: ape.Role,
|
||||||
file_path: TestFile,
|
file_path: TestFile,
|
||||||
|
rpc_endpoint: str,
|
||||||
):
|
):
|
||||||
denied_wallet = other_wallet if role == ape.Role.OTHERS else default_wallet
|
|
||||||
allowed_wallet = default_wallet if role == ape.Role.OTHERS else other_wallet
|
|
||||||
allowed_role = ape.Role.OWNER if role == ape.Role.OTHERS else ape.Role.OTHERS
|
|
||||||
cid, object_oids, file_path = container_with_objects
|
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
role_condition = ape.Condition.by_role(role.value)
|
|
||||||
|
|
||||||
with reporter.step(f"Deny all operations for {role} via APE"):
|
with reporter.step(f"Deny all operations for {role} via APE"):
|
||||||
deny_rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, role_condition)
|
deny_rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, ape.Condition.by_role(role.value))
|
||||||
frostfs_cli.ape_manager.add(endpoint, deny_rule.chain_id, target_name=cid, target_type="container", rule=deny_rule.as_string())
|
frostfs_cli.ape_manager.add(
|
||||||
|
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
|
||||||
|
)
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
|
||||||
with reporter.step(f"Assert {role} have no access to public container"):
|
with reporter.step(f"Assert denied role have no access to public container"):
|
||||||
assert_no_access_to_container(denied_wallet, cid, object_oids[0], file_path, self.shell, self.cluster)
|
# access checks will try to remove object, so we use .pop() to ensure we have object before deletion
|
||||||
|
assert_no_access_to_container(denied_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step(f"Assert {allowed_role} have full access to public container"):
|
with reporter.step(f"Assert allowed role have full access to public container"):
|
||||||
assert_full_access_to_container(allowed_wallet, cid, object_oids.pop(), file_path, self.shell, self.cluster)
|
assert_full_access_to_container(allowed_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step(f"Remove deny rule from APE"):
|
with reporter.step(f"Remove deny rule from APE"):
|
||||||
frostfs_cli.ape_manager.remove(endpoint, deny_rule.chain_id, target_name=cid, target_type="container")
|
frostfs_cli.ape_manager.remove(rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container")
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
|
||||||
with reporter.step("Assert owner have full access to public container"):
|
with reporter.step("Assert allowed role have full access to public container"):
|
||||||
assert_full_access_to_container(default_wallet, cid, object_oids.pop(), file_path, self.shell, self.cluster)
|
assert_full_access_to_container(allowed_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Assert others have full access to public container"):
|
with reporter.step("Assert denied role have full access to public container"):
|
||||||
assert_full_access_to_container(other_wallet, cid, object_oids.pop(), file_path, self.shell, self.cluster)
|
assert_full_access_to_container(denied_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
@allure.title("Deny operations for others via APE excluding single pubkey (obj_size={object_size})")
|
@allure.title("Deny operations for others via APE excluding single pubkey (obj_size={object_size})")
|
||||||
def test_deny_opeartions_excluding_pubkey(
|
def test_deny_opeartions_excluding_pubkey(
|
||||||
|
@ -89,11 +82,11 @@ class TestApeContainer(ClusterTestBase):
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
other_wallet: WalletInfo,
|
other_wallet: WalletInfo,
|
||||||
other_wallet_2: WalletInfo,
|
other_wallet_2: WalletInfo,
|
||||||
container_with_objects: tuple[str, list[str], str],
|
container: str,
|
||||||
|
objects: list[str],
|
||||||
|
rpc_endpoint: str,
|
||||||
|
file_path: TestFile,
|
||||||
):
|
):
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
cid, object_oids, file_path = container_with_objects
|
|
||||||
|
|
||||||
with reporter.step("Add deny APE rules for others except single wallet"):
|
with reporter.step("Add deny APE rules for others except single wallet"):
|
||||||
rule_conditions = [
|
rule_conditions = [
|
||||||
ape.Condition.by_role(ape.Role.OTHERS),
|
ape.Condition.by_role(ape.Role.OTHERS),
|
||||||
|
@ -103,29 +96,40 @@ class TestApeContainer(ClusterTestBase):
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, rule_conditions)
|
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, rule_conditions)
|
||||||
frostfs_cli.ape_manager.add(endpoint, rule.chain_id, target_name=cid, target_type="container", rule=rule.as_string())
|
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
|
||||||
with reporter.step("Assert others have no access to public container"):
|
with reporter.step("Assert others have no access to public container"):
|
||||||
assert_no_access_to_container(other_wallet, cid, object_oids[0], file_path, self.shell, self.cluster)
|
# access checks will try to remove object, so we use .pop() to ensure we have object before deletion
|
||||||
|
assert_no_access_to_container(other_wallet, container, objects[0], file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Assert owner have full access to public container"):
|
with reporter.step("Assert owner have full access to public container"):
|
||||||
assert_full_access_to_container(default_wallet, cid, object_oids.pop(), file_path, self.shell, self.cluster)
|
assert_full_access_to_container(default_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Assert allowed wallet have full access to public container"):
|
with reporter.step("Assert allowed wallet have full access to public container"):
|
||||||
assert_full_access_to_container(other_wallet_2, cid, object_oids.pop(), file_path, self.shell, self.cluster)
|
assert_full_access_to_container(other_wallet_2, container, objects.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
@allure.title("Replication works with APE deny rules on OWNER and OTHERS (obj_size={object_size})")
|
@allure.title("Replication works with APE deny rules on OWNER and OTHERS (obj_size={object_size})")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[ContainerRequest(f"REP %NODE_COUNT% IN X CBF 1 SELECT %NODE_COUNT% FROM * AS X", APE_EVERYONE_ALLOW_ALL, "custom")],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
def test_replication_works_with_deny_rules(
|
def test_replication_works_with_deny_rules(
|
||||||
self,
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
frostfs_cli: FrostfsCli,
|
frostfs_cli: FrostfsCli,
|
||||||
full_placement_container_with_object: tuple[str, list[str], str],
|
container: str,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
file_path: TestFile,
|
||||||
):
|
):
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
with reporter.step("Put object to container"):
|
||||||
cid, oid = full_placement_container_with_object
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
storage_nodes = self.cluster.storage_nodes
|
|
||||||
|
with reporter.step("Wait for object replication after upload"):
|
||||||
|
wait_object_replication(container, oid, len(self.cluster.cluster_nodes), self.shell, self.cluster.storage_nodes)
|
||||||
|
|
||||||
with reporter.step("Add deny APE rules for owner and others"):
|
with reporter.step("Add deny APE rules for owner and others"):
|
||||||
rule_conditions = [
|
rule_conditions = [
|
||||||
|
@ -134,93 +138,96 @@ class TestApeContainer(ClusterTestBase):
|
||||||
]
|
]
|
||||||
for rule_condition in rule_conditions:
|
for rule_condition in rule_conditions:
|
||||||
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, rule_condition)
|
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, rule_condition)
|
||||||
frostfs_cli.ape_manager.add(endpoint, rule.chain_id, target_name=cid, target_type="container", rule=rule.as_string())
|
frostfs_cli.ape_manager.add(
|
||||||
|
rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string()
|
||||||
|
)
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
|
||||||
with reporter.step("Drop object"):
|
with reporter.step("Drop object"):
|
||||||
drop_object(storage_nodes[0], cid, oid)
|
drop_object(self.cluster.storage_nodes[0], container, oid)
|
||||||
|
|
||||||
with reporter.step("Wait for dropped object to be replicated"):
|
with reporter.step("Wait for dropped object to be replicated"):
|
||||||
wait_object_replication(cid, oid, len(storage_nodes), self.shell, storage_nodes)
|
wait_object_replication(container, oid, len(self.cluster.storage_nodes), self.shell, self.cluster.storage_nodes)
|
||||||
|
|
||||||
@allure.title("Deny operations via APE by role (role=ir, obj_size={object_size})")
|
@allure.title("Deny operations via APE by role (role=ir, obj_size={object_size})")
|
||||||
|
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
|
||||||
def test_deny_operations_via_ape_by_role_ir(
|
def test_deny_operations_via_ape_by_role_ir(
|
||||||
self, frostfs_cli: FrostfsCli, ir_wallet: WalletInfo, container_with_objects: tuple[str, list[str], str]
|
self, frostfs_cli: FrostfsCli, ir_wallet: WalletInfo, container: str, objects: list[str], rpc_endpoint: str, file_path: TestFile
|
||||||
):
|
):
|
||||||
cid, object_oids, file_path = container_with_objects
|
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
default_ir_access = {
|
default_ir_access = {
|
||||||
ape.ObjectOperations.PUT: False,
|
ape.ObjectOperations.PUT: False,
|
||||||
ape.ObjectOperations.GET: True,
|
ape.ObjectOperations.GET: True,
|
||||||
ape.ObjectOperations.HEAD: True,
|
ape.ObjectOperations.HEAD: True,
|
||||||
ape.ObjectOperations.GET_RANGE: False,
|
ape.ObjectOperations.GET_RANGE: True,
|
||||||
ape.ObjectOperations.GET_RANGE_HASH: True,
|
ape.ObjectOperations.GET_RANGE_HASH: True,
|
||||||
ape.ObjectOperations.SEARCH: True,
|
ape.ObjectOperations.SEARCH: True,
|
||||||
ape.ObjectOperations.DELETE: False,
|
ape.ObjectOperations.DELETE: False,
|
||||||
}
|
}
|
||||||
|
|
||||||
with reporter.step("Assert IR wallet access in default state"):
|
with reporter.step("Assert IR wallet access in default state"):
|
||||||
assert_access_to_container(default_ir_access, ir_wallet, cid, object_oids[0], file_path, self.shell, self.cluster)
|
assert_access_to_container(default_ir_access, ir_wallet, container, objects[0], file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Add deny APE rule with deny all operations for IR role"):
|
with reporter.step("Add deny APE rule with deny all operations for IR role"):
|
||||||
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, [ape.Condition.by_role(ape.Role.IR.value)])
|
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, [ape.Condition.by_role(ape.Role.IR.value)])
|
||||||
frostfs_cli.ape_manager.add(endpoint, rule.chain_id, target_name=cid, target_type="container", rule=rule.as_string())
|
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
|
||||||
with reporter.step("Assert IR wallet ignores APE rules"):
|
with reporter.step("Assert IR wallet ignores APE rules"):
|
||||||
assert_access_to_container(default_ir_access, ir_wallet, cid, object_oids[0], file_path, self.shell, self.cluster)
|
assert_access_to_container(default_ir_access, ir_wallet, container, objects[0], file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Remove APE rule"):
|
with reporter.step("Remove APE rule"):
|
||||||
frostfs_cli.ape_manager.remove(endpoint, rule.chain_id, target_name=cid, target_type="container")
|
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
|
||||||
with reporter.step("Assert IR wallet access is restored"):
|
with reporter.step("Assert IR wallet access is restored"):
|
||||||
assert_access_to_container(default_ir_access, ir_wallet, cid, object_oids[0], file_path, self.shell, self.cluster)
|
assert_access_to_container(default_ir_access, ir_wallet, container, objects[0], file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
@allure.title("Deny operations via APE by role (role=container, obj_size={object_size})")
|
@allure.title("Deny operations via APE by role (role=container, obj_size={object_size})")
|
||||||
|
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
|
||||||
def test_deny_operations_via_ape_by_role_container(
|
def test_deny_operations_via_ape_by_role_container(
|
||||||
self, frostfs_cli: FrostfsCli, storage_wallet: WalletInfo, container_with_objects: tuple[str, list[str], str]
|
self,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
container_node_wallet: WalletInfo,
|
||||||
|
container: str,
|
||||||
|
objects: list[str],
|
||||||
|
rpc_endpoint: str,
|
||||||
|
file_path: TestFile,
|
||||||
):
|
):
|
||||||
|
access_matrix = {
|
||||||
cid, object_oids, file_path = container_with_objects
|
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
default_container_access = {
|
|
||||||
ape.ObjectOperations.PUT: True,
|
ape.ObjectOperations.PUT: True,
|
||||||
ape.ObjectOperations.GET: True,
|
ape.ObjectOperations.GET: True,
|
||||||
ape.ObjectOperations.HEAD: True,
|
ape.ObjectOperations.HEAD: True,
|
||||||
ape.ObjectOperations.GET_RANGE: False,
|
ape.ObjectOperations.GET_RANGE: True,
|
||||||
ape.ObjectOperations.GET_RANGE_HASH: True,
|
ape.ObjectOperations.GET_RANGE_HASH: True,
|
||||||
ape.ObjectOperations.SEARCH: True,
|
ape.ObjectOperations.SEARCH: True,
|
||||||
ape.ObjectOperations.DELETE: False,
|
ape.ObjectOperations.DELETE: True,
|
||||||
}
|
}
|
||||||
|
|
||||||
with reporter.step("Assert CONTAINER wallet access in default state"):
|
with reporter.step("Assert CONTAINER wallet access in default state"):
|
||||||
assert_access_to_container(default_container_access, storage_wallet, cid, object_oids[0], file_path, self.shell, self.cluster)
|
assert_access_to_container(access_matrix, container_node_wallet, container, objects[0], file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, ape.Condition.by_role(ape.Role.CONTAINER.value))
|
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, ape.Condition.by_role(ape.Role.CONTAINER.value))
|
||||||
|
|
||||||
with reporter.step(f"Add APE rule with deny all operations for CONTAINER and IR roles"):
|
with reporter.step(f"Add APE rule with deny all operations for CONTAINER and IR roles"):
|
||||||
frostfs_cli.ape_manager.add(endpoint, rule.chain_id, target_name=cid, target_type="container", rule=rule.as_string())
|
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
|
||||||
with reporter.step("Assert CONTAINER wallet ignores APE rule"):
|
with reporter.step("Assert CONTAINER wallet ignores APE rule"):
|
||||||
assert_access_to_container(default_container_access, storage_wallet, cid, object_oids[0], file_path, self.shell, self.cluster)
|
assert_access_to_container(access_matrix, container_node_wallet, container, objects[1], file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Remove APE rules"):
|
with reporter.step("Remove APE rule"):
|
||||||
frostfs_cli.ape_manager.remove(endpoint, rule.chain_id, target_name=cid, target_type="container")
|
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
|
||||||
with reporter.step("Assert CONTAINER wallet access is restored"):
|
with reporter.step("Assert CONTAINER wallet access after rule was removed"):
|
||||||
assert_access_to_container(default_container_access, storage_wallet, cid, object_oids[0], file_path, self.shell, self.cluster)
|
assert_access_to_container(access_matrix, container_node_wallet, container, objects[2], file_path, self.shell, self.cluster)
|
||||||
|
|
|
@ -2,28 +2,27 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND
|
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.steps.cli.object import get_object_from_random_node, head_object, put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import get_object_from_random_node, head_object, put_object_to_random_node
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
|
||||||
from frostfs_testlib.storage.dataclasses import ape
|
from frostfs_testlib.storage.dataclasses import ape
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.testing.test_control import expect_not_raises
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
from frostfs_testlib.utils.file_utils import TestFile
|
from frostfs_testlib.utils.file_utils import TestFile
|
||||||
|
|
||||||
from pytest_tests.helpers.bearer_token import create_bearer_token
|
from ....helpers.bearer_token import create_bearer_token
|
||||||
from pytest_tests.helpers.container_access import (
|
from ....helpers.container_access import (
|
||||||
ALL_OBJECT_OPERATIONS,
|
ALL_OBJECT_OPERATIONS,
|
||||||
FULL_ACCESS,
|
FULL_ACCESS,
|
||||||
assert_access_to_container,
|
assert_access_to_container,
|
||||||
assert_full_access_to_container,
|
assert_full_access_to_container,
|
||||||
assert_no_access_to_container,
|
assert_no_access_to_container,
|
||||||
)
|
)
|
||||||
from pytest_tests.helpers.object_access import OBJECT_NO_ACCESS
|
from ....helpers.container_request import OWNER_ALLOW_ALL
|
||||||
|
from ....helpers.object_access import OBJECT_ACCESS_DENIED
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.ape
|
@pytest.mark.ape
|
||||||
class TestApeFilters(ClusterTestBase):
|
class TestApeFilters(ClusterTestBase):
|
||||||
# SPEC: https://github.com/nspcc-dev/neofs-spec/blob/master/01-arch/07-acl.md
|
# SPEC: https://github.com/nspcc-dev/neofs-spec/blob/master/01-arch/07-acl.md
|
||||||
|
@ -47,61 +46,27 @@ class TestApeFilters(ClusterTestBase):
|
||||||
ape.ObjectOperations.PUT,
|
ape.ObjectOperations.PUT,
|
||||||
]
|
]
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
@pytest.fixture
|
||||||
def public_container_with_objects(self, default_wallet: WalletInfo, file_path: TestFile):
|
def objects_with_attributes(self, default_wallet: WalletInfo, file_path: TestFile, container: str):
|
||||||
with reporter.step("Create public container"):
|
return [
|
||||||
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, basic_acl=PUBLIC_ACL)
|
put_object_to_random_node(
|
||||||
|
default_wallet, file_path, container, self.shell, self.cluster, attributes={**self.ATTRIBUTES, "key": val}
|
||||||
objects_with_header, objects_with_other_header, objects_without_header = self._fill_container(default_wallet, file_path, cid)
|
|
||||||
|
|
||||||
return cid, objects_with_header, objects_with_other_header, objects_without_header, file_path
|
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
|
||||||
def private_container(self, default_wallet: WalletInfo, frostfs_cli: FrostfsCli, cluster: Cluster):
|
|
||||||
with reporter.step("Create private container"):
|
|
||||||
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, basic_acl="0")
|
|
||||||
|
|
||||||
with reporter.step("Create allow APE rule for container owner"):
|
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OWNER)
|
|
||||||
deny_rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, role_condition)
|
|
||||||
|
|
||||||
frostfs_cli.ape_manager.add(
|
|
||||||
cluster.default_rpc_endpoint,
|
|
||||||
deny_rule.chain_id,
|
|
||||||
target_name=cid,
|
|
||||||
target_type="container",
|
|
||||||
rule=deny_rule.as_string(),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
|
||||||
self.wait_for_blocks()
|
|
||||||
|
|
||||||
return cid
|
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
|
||||||
def container_with_objects(self, private_container: str, default_wallet: WalletInfo, file_path: TestFile):
|
|
||||||
objects_with_header, objects_with_other_header, objects_without_header = self._fill_container(
|
|
||||||
default_wallet, file_path, private_container
|
|
||||||
)
|
|
||||||
return private_container, objects_with_header, objects_with_other_header, objects_without_header, file_path
|
|
||||||
|
|
||||||
@reporter.step("Add objects to container")
|
|
||||||
def _fill_container(self, wallet: WalletInfo, test_file: TestFile, cid: str):
|
|
||||||
objects_with_header = [
|
|
||||||
put_object_to_random_node(wallet, test_file, cid, self.shell, self.cluster, attributes={**self.ATTRIBUTES, "key": val})
|
|
||||||
for val in range(self.OBJECT_COUNT)
|
for val in range(self.OBJECT_COUNT)
|
||||||
]
|
]
|
||||||
|
|
||||||
objects_with_other_header = [
|
@pytest.fixture
|
||||||
put_object_to_random_node(wallet, test_file, cid, self.shell, self.cluster, attributes={**self.OTHER_ATTRIBUTES, "key": val})
|
def objects_with_other_attributes(self, default_wallet: WalletInfo, file_path: TestFile, container: str):
|
||||||
|
return [
|
||||||
|
put_object_to_random_node(
|
||||||
|
default_wallet, file_path, container, self.shell, self.cluster, attributes={**self.OTHER_ATTRIBUTES, "key": val}
|
||||||
|
)
|
||||||
for val in range(self.OBJECT_COUNT)
|
for val in range(self.OBJECT_COUNT)
|
||||||
]
|
]
|
||||||
|
|
||||||
objects_without_header = [
|
@pytest.fixture
|
||||||
put_object_to_random_node(wallet, test_file, cid, self.shell, self.cluster) for _ in range(self.OBJECT_COUNT)
|
def objects_without_attributes(self, default_wallet: WalletInfo, file_path: TestFile, container: str):
|
||||||
]
|
return [put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster) for _ in range(self.OBJECT_COUNT)]
|
||||||
|
|
||||||
return objects_with_header, objects_with_other_header, objects_without_header
|
|
||||||
|
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
@allure.title("Operations with request filter (match_type={match_type}, obj_size={object_size})")
|
@allure.title("Operations with request filter (match_type={match_type}, obj_size={object_size})")
|
||||||
|
@ -112,24 +77,22 @@ class TestApeFilters(ClusterTestBase):
|
||||||
frostfs_cli: FrostfsCli,
|
frostfs_cli: FrostfsCli,
|
||||||
temp_directory: str,
|
temp_directory: str,
|
||||||
other_wallet: WalletInfo,
|
other_wallet: WalletInfo,
|
||||||
public_container_with_objects: tuple[str, list[str], str],
|
container: str,
|
||||||
|
objects_with_attributes: list[str],
|
||||||
|
objects_with_other_attributes: list[str],
|
||||||
|
objects_without_attributes: list[str],
|
||||||
match_type: ape.MatchType,
|
match_type: ape.MatchType,
|
||||||
|
file_path: TestFile,
|
||||||
|
rpc_endpoint: str,
|
||||||
):
|
):
|
||||||
(
|
|
||||||
cid,
|
|
||||||
objects_with_header,
|
|
||||||
objects_with_other_header,
|
|
||||||
objects_without_header,
|
|
||||||
file_path,
|
|
||||||
) = public_container_with_objects
|
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Deny all operations for others via APE with request condition"):
|
with reporter.step("Deny all operations for others via APE with request condition"):
|
||||||
request_condition = ape.Condition('"frostfs:xheader/check_key"', '"check_value"', ape.ConditionType.REQUEST, match_type)
|
request_condition = ape.Condition('"frostfs:xheader/check_key"', '"check_value"', ape.ConditionType.REQUEST, match_type)
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
||||||
deny_rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, [request_condition, role_condition])
|
deny_rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, [request_condition, role_condition])
|
||||||
|
|
||||||
frostfs_cli.ape_manager.add(endpoint, deny_rule.chain_id, target_name=cid, target_type="container", rule=deny_rule.as_string())
|
frostfs_cli.ape_manager.add(
|
||||||
|
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
|
||||||
|
)
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
@ -137,7 +100,7 @@ class TestApeFilters(ClusterTestBase):
|
||||||
with reporter.step("Create bearer token with everything allowed for others role"):
|
with reporter.step("Create bearer token with everything allowed for others role"):
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
||||||
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
|
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
|
||||||
bearer = create_bearer_token(frostfs_cli, temp_directory, cid, rule, endpoint)
|
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
|
||||||
|
|
||||||
# Filter denies requests where "check_key {match_type} ATTRIBUTE", so when match_type
|
# Filter denies requests where "check_key {match_type} ATTRIBUTE", so when match_type
|
||||||
# is STRING_EQUAL, then requests with "check_key=OTHER_ATTRIBUTE" will be allowed while
|
# is STRING_EQUAL, then requests with "check_key=OTHER_ATTRIBUTE" will be allowed while
|
||||||
|
@ -147,18 +110,22 @@ class TestApeFilters(ClusterTestBase):
|
||||||
|
|
||||||
# We test on 3 groups of objects with various headers,
|
# We test on 3 groups of objects with various headers,
|
||||||
# but APE rule should ignore object headers and only work based on request headers
|
# but APE rule should ignore object headers and only work based on request headers
|
||||||
for oids in [objects_with_header, objects_with_other_header, objects_without_header]:
|
for oids in [objects_with_attributes, objects_with_other_attributes, objects_without_attributes]:
|
||||||
with reporter.step("Check others has full access when sending request without headers"):
|
with reporter.step("Check others has full access when sending request without headers"):
|
||||||
assert_full_access_to_container(other_wallet, cid, oids.pop(), file_path, self.shell, self.cluster)
|
assert_full_access_to_container(other_wallet, container, oids.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Check others has full access when sending request with allowed headers"):
|
with reporter.step("Check others has full access when sending request with allowed headers"):
|
||||||
assert_full_access_to_container(other_wallet, cid, oids.pop(), file_path, self.shell, self.cluster, xhdr=allow_headers)
|
assert_full_access_to_container(
|
||||||
|
other_wallet, container, oids.pop(), file_path, self.shell, self.cluster, xhdr=allow_headers
|
||||||
|
)
|
||||||
|
|
||||||
with reporter.step("Check others has no access when sending request with denied headers"):
|
with reporter.step("Check others has no access when sending request with denied headers"):
|
||||||
assert_no_access_to_container(other_wallet, cid, oids.pop(), file_path, self.shell, self.cluster, xhdr=deny_headers)
|
assert_no_access_to_container(other_wallet, container, oids.pop(), file_path, self.shell, self.cluster, xhdr=deny_headers)
|
||||||
|
|
||||||
with reporter.step("Check others has full access when sending request with denied headers and using bearer token"):
|
with reporter.step("Check others has full access when sending request with denied headers and using bearer token"):
|
||||||
assert_full_access_to_container(other_wallet, cid, oids.pop(), file_path, self.shell, self.cluster, bearer, deny_headers)
|
assert_full_access_to_container(
|
||||||
|
other_wallet, container, oids.pop(), file_path, self.shell, self.cluster, bearer, deny_headers
|
||||||
|
)
|
||||||
|
|
||||||
@allure.title("Operations with deny user headers filter (match_type={match_type}, obj_size={object_size})")
|
@allure.title("Operations with deny user headers filter (match_type={match_type}, obj_size={object_size})")
|
||||||
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
|
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
|
||||||
|
@ -168,19 +135,14 @@ class TestApeFilters(ClusterTestBase):
|
||||||
frostfs_cli: FrostfsCli,
|
frostfs_cli: FrostfsCli,
|
||||||
temp_directory: str,
|
temp_directory: str,
|
||||||
other_wallet: WalletInfo,
|
other_wallet: WalletInfo,
|
||||||
public_container_with_objects: tuple[str, list[str], str],
|
container: str,
|
||||||
|
objects_with_attributes: list[str],
|
||||||
|
objects_with_other_attributes: list[str],
|
||||||
|
objects_without_attributes: list[str],
|
||||||
match_type: ape.MatchType,
|
match_type: ape.MatchType,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
file_path: TestFile,
|
||||||
):
|
):
|
||||||
# TODO: Refactor this to be fixtures, not test logic
|
|
||||||
(
|
|
||||||
cid,
|
|
||||||
objects_with_attributes,
|
|
||||||
objects_with_other_attributes,
|
|
||||||
objs_without_attributes,
|
|
||||||
file_path,
|
|
||||||
) = public_container_with_objects
|
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
allow_objects = objects_with_other_attributes if match_type == ape.MatchType.EQUAL else objects_with_attributes
|
allow_objects = objects_with_other_attributes if match_type == ape.MatchType.EQUAL else objects_with_attributes
|
||||||
deny_objects = objects_with_attributes if match_type == ape.MatchType.EQUAL else objects_with_other_attributes
|
deny_objects = objects_with_attributes if match_type == ape.MatchType.EQUAL else objects_with_other_attributes
|
||||||
|
|
||||||
|
@ -210,15 +172,15 @@ class TestApeFilters(ClusterTestBase):
|
||||||
ape.ObjectOperations.DELETE: False, # Because delete needs to put a tombstone without attributes
|
ape.ObjectOperations.DELETE: False, # Because delete needs to put a tombstone without attributes
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
# End of refactor
|
|
||||||
|
|
||||||
with reporter.step("Deny operations for others via APE with resource condition"):
|
with reporter.step("Deny operations for others via APE with resource condition"):
|
||||||
resource_condition = ape.Condition('"check_key"', '"check_value"', ape.ConditionType.RESOURCE, match_type)
|
resource_condition = ape.Condition('"check_key"', '"check_value"', ape.ConditionType.RESOURCE, match_type)
|
||||||
not_a_tombstone_condition = ape.Condition.by_object_type("TOMBSTONE", ape.ConditionType.RESOURCE, ape.MatchType.NOT_EQUAL)
|
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
||||||
deny_rule = ape.Rule(ape.Verb.DENY, self.RESOURCE_OPERATIONS, [resource_condition, role_condition])
|
deny_rule = ape.Rule(ape.Verb.DENY, self.RESOURCE_OPERATIONS, [resource_condition, role_condition])
|
||||||
|
|
||||||
frostfs_cli.ape_manager.add(endpoint, deny_rule.chain_id, target_name=cid, target_type="container", rule=deny_rule.as_string())
|
frostfs_cli.ape_manager.add(
|
||||||
|
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
|
||||||
|
)
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
@ -226,12 +188,12 @@ class TestApeFilters(ClusterTestBase):
|
||||||
with reporter.step("Create bearer token with everything allowed for others role"):
|
with reporter.step("Create bearer token with everything allowed for others role"):
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
||||||
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
|
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
|
||||||
bearer = create_bearer_token(frostfs_cli, temp_directory, cid, rule, endpoint)
|
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step("Create bearer token with allowed put for others role"):
|
with reporter.step("Create bearer token with allowed put for others role"):
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
||||||
rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.PUT, role_condition)
|
rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.PUT, role_condition)
|
||||||
bearer_put = create_bearer_token(frostfs_cli, temp_directory, cid, rule, endpoint)
|
bearer_put = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
|
||||||
|
|
||||||
# We will attempt requests with various headers,
|
# We will attempt requests with various headers,
|
||||||
# but APE rule should ignore request headers and validate only object headers
|
# but APE rule should ignore request headers and validate only object headers
|
||||||
|
@ -240,8 +202,8 @@ class TestApeFilters(ClusterTestBase):
|
||||||
assert_access_to_container(
|
assert_access_to_container(
|
||||||
no_attributes_access[match_type],
|
no_attributes_access[match_type],
|
||||||
other_wallet,
|
other_wallet,
|
||||||
cid,
|
container,
|
||||||
objs_without_attributes.pop(),
|
objects_without_attributes.pop(),
|
||||||
file_path,
|
file_path,
|
||||||
self.shell,
|
self.shell,
|
||||||
self.cluster,
|
self.cluster,
|
||||||
|
@ -250,72 +212,70 @@ class TestApeFilters(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Check others have full access to objects without deny attribute"):
|
with reporter.step("Check others have full access to objects without deny attribute"):
|
||||||
assert_access_to_container(
|
assert_access_to_container(
|
||||||
allowed_access[match_type], other_wallet, cid, allow_objects.pop(), file_path, self.shell, self.cluster, xhdr=xhdr
|
allowed_access[match_type], other_wallet, container, allow_objects.pop(), file_path, self.shell, self.cluster, xhdr=xhdr
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Check others have no access to objects with deny attribute"):
|
with reporter.step("Check others have no access to objects with deny attribute"):
|
||||||
with pytest.raises(Exception, match=OBJECT_NO_ACCESS):
|
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||||
head_object(other_wallet, cid, deny_objects[0], self.shell, endpoint, xhdr=xhdr)
|
head_object(other_wallet, container, deny_objects[0], self.shell, rpc_endpoint, xhdr=xhdr)
|
||||||
|
|
||||||
with pytest.raises(Exception, match=OBJECT_NO_ACCESS):
|
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||||
get_object_from_random_node(other_wallet, cid, deny_objects[0], self.shell, self.cluster, xhdr=xhdr)
|
get_object_from_random_node(other_wallet, container, deny_objects[0], self.shell, self.cluster, xhdr=xhdr)
|
||||||
|
|
||||||
with reporter.step("Check others have access to objects with deny attribute and using bearer token"):
|
with reporter.step("Check others have access to objects with deny attribute and using bearer token"):
|
||||||
assert_full_access_to_container(other_wallet, cid, deny_objects.pop(), file_path, self.shell, self.cluster, bearer, xhdr)
|
assert_full_access_to_container(
|
||||||
|
other_wallet, container, deny_objects.pop(), file_path, self.shell, self.cluster, bearer, xhdr
|
||||||
|
)
|
||||||
|
|
||||||
allow_attribute = self.OTHER_HEADER if match_type == ape.MatchType.EQUAL else self.HEADER
|
allow_attribute = self.OTHER_HEADER if match_type == ape.MatchType.EQUAL else self.HEADER
|
||||||
with reporter.step("Check others can PUT objects without denied attribute"):
|
with reporter.step("Check others can PUT objects without denied attribute"):
|
||||||
put_object_to_random_node(other_wallet, file_path, cid, self.shell, self.cluster, attributes=allow_attribute)
|
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=allow_attribute)
|
||||||
|
|
||||||
deny_attribute = self.HEADER if match_type == ape.MatchType.EQUAL else self.OTHER_HEADER
|
deny_attribute = self.HEADER if match_type == ape.MatchType.EQUAL else self.OTHER_HEADER
|
||||||
with reporter.step("Check others can not PUT objects with denied attribute"):
|
with reporter.step("Check others can not PUT objects with denied attribute"):
|
||||||
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||||
put_object_to_random_node(other_wallet, file_path, cid, self.shell, self.cluster, attributes=deny_attribute)
|
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=deny_attribute)
|
||||||
|
|
||||||
with reporter.step("Check others can PUT objects with denied attribute and using bearer token"):
|
with reporter.step("Check others can PUT objects with denied attribute and using bearer token"):
|
||||||
put_object_to_random_node(other_wallet, file_path, cid, self.shell, self.cluster, bearer_put, attributes=deny_attribute)
|
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer_put, attributes=deny_attribute)
|
||||||
|
|
||||||
@allure.title("Operations with allow APE rule with resource filters (match_type={match_type}, obj_size={object_size})")
|
@allure.title("Operations with allow APE rule with resource filters (match_type={match_type}, obj_size={object_size})")
|
||||||
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
|
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
|
||||||
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
|
@pytest.mark.parametrize("object_size, container_request", [("simple", OWNER_ALLOW_ALL)], indirect=True)
|
||||||
def test_ape_allow_filters_object(
|
def test_ape_allow_filters_object(
|
||||||
self,
|
self,
|
||||||
frostfs_cli: FrostfsCli,
|
frostfs_cli: FrostfsCli,
|
||||||
other_wallet: WalletInfo,
|
other_wallet: WalletInfo,
|
||||||
container_with_objects: tuple[str, list[str], str],
|
container: str,
|
||||||
temp_directory: str,
|
objects_with_attributes: list[str],
|
||||||
|
objects_with_other_attributes: list[str],
|
||||||
|
objects_without_attributes: list[str],
|
||||||
match_type: ape.MatchType,
|
match_type: ape.MatchType,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
file_path: TestFile,
|
||||||
|
temp_directory: str,
|
||||||
):
|
):
|
||||||
# TODO: Refactor this to be fixtures, not test logic!
|
|
||||||
(
|
|
||||||
cid,
|
|
||||||
objects_with_attributes,
|
|
||||||
objects_with_other_attributes,
|
|
||||||
objects_without_attributes,
|
|
||||||
file_path,
|
|
||||||
) = container_with_objects
|
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
if match_type == ape.MatchType.EQUAL:
|
if match_type == ape.MatchType.EQUAL:
|
||||||
allow_objects = objects_with_attributes
|
allow_objects = objects_with_attributes
|
||||||
deny_objects = objects_with_other_attributes
|
deny_objects = objects_with_other_attributes
|
||||||
allow_attribute = self.HEADER
|
allow_attribute = self.HEADER
|
||||||
deny_attribute = self.OTHER_HEADER
|
deny_attribute = self.OTHER_HEADER
|
||||||
no_attributes_match_context = pytest.raises(Exception, match=OBJECT_NO_ACCESS)
|
no_attributes_match_context = pytest.raises(Exception, match=OBJECT_ACCESS_DENIED)
|
||||||
else:
|
else:
|
||||||
allow_objects = objects_with_other_attributes
|
allow_objects = objects_with_other_attributes
|
||||||
deny_objects = objects_with_attributes
|
deny_objects = objects_with_attributes
|
||||||
allow_attribute = self.OTHER_HEADER
|
allow_attribute = self.OTHER_HEADER
|
||||||
deny_attribute = self.HEADER
|
deny_attribute = self.HEADER
|
||||||
no_attributes_match_context = expect_not_raises()
|
no_attributes_match_context = expect_not_raises()
|
||||||
# End of refactor block
|
|
||||||
|
|
||||||
with reporter.step("Allow operations for others except few operations by resource condition via APE"):
|
with reporter.step("Allow operations for others except few operations by resource condition via APE"):
|
||||||
resource_condition = ape.Condition('"check_key"', '"check_value"', ape.ConditionType.RESOURCE, match_type)
|
resource_condition = ape.Condition('"check_key"', '"check_value"', ape.ConditionType.RESOURCE, match_type)
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
||||||
deny_rule = ape.Rule(ape.Verb.ALLOW, self.RESOURCE_OPERATIONS, [resource_condition, role_condition])
|
deny_rule = ape.Rule(ape.Verb.ALLOW, self.RESOURCE_OPERATIONS, [resource_condition, role_condition])
|
||||||
|
|
||||||
frostfs_cli.ape_manager.add(endpoint, deny_rule.chain_id, target_name=cid, target_type="container", rule=deny_rule.as_string())
|
frostfs_cli.ape_manager.add(
|
||||||
|
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
|
||||||
|
)
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
@ -323,113 +283,115 @@ class TestApeFilters(ClusterTestBase):
|
||||||
with reporter.step("Check GET, PUT and HEAD operations with objects without attributes for OTHERS role"):
|
with reporter.step("Check GET, PUT and HEAD operations with objects without attributes for OTHERS role"):
|
||||||
oid = objects_without_attributes.pop()
|
oid = objects_without_attributes.pop()
|
||||||
with no_attributes_match_context:
|
with no_attributes_match_context:
|
||||||
assert head_object(other_wallet, cid, oid, self.shell, endpoint)
|
assert head_object(other_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
with no_attributes_match_context:
|
with no_attributes_match_context:
|
||||||
assert get_object_from_random_node(other_wallet, cid, oid, self.shell, self.cluster)
|
assert get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster)
|
||||||
|
|
||||||
with no_attributes_match_context:
|
with no_attributes_match_context:
|
||||||
assert put_object_to_random_node(other_wallet, file_path, cid, self.shell, self.cluster)
|
assert put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Create bearer token with everything allowed for others role"):
|
with reporter.step("Create bearer token with everything allowed for others role"):
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
||||||
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
|
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
|
||||||
bearer = create_bearer_token(frostfs_cli, temp_directory, cid, rule, endpoint)
|
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step("Check others can get and put objects without attributes and using bearer token"):
|
with reporter.step("Check others can get and put objects without attributes and using bearer token"):
|
||||||
oid = objects_without_attributes[0]
|
oid = objects_without_attributes[0]
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
head_object(other_wallet, cid, oid, self.shell, endpoint, bearer)
|
head_object(other_wallet, container, oid, self.shell, rpc_endpoint, bearer)
|
||||||
|
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
get_object_from_random_node(other_wallet, cid, oid, self.shell, self.cluster, bearer)
|
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
|
||||||
|
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
put_object_to_random_node(other_wallet, file_path, cid, self.shell, self.cluster, bearer)
|
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer)
|
||||||
|
|
||||||
with reporter.step("Check others can get and put objects with attributes matching the filter"):
|
with reporter.step("Check others can get and put objects with attributes matching the filter"):
|
||||||
oid = allow_objects.pop()
|
oid = allow_objects.pop()
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
head_object(other_wallet, cid, oid, self.shell, endpoint)
|
head_object(other_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
get_object_from_random_node(other_wallet, cid, oid, self.shell, self.cluster)
|
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster)
|
||||||
|
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
put_object_to_random_node(other_wallet, file_path, cid, self.shell, self.cluster, attributes=allow_attribute)
|
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=allow_attribute)
|
||||||
|
|
||||||
with reporter.step("Check others cannot get and put objects without attributes matching the filter"):
|
with reporter.step("Check others cannot get and put objects without attributes matching the filter"):
|
||||||
oid = deny_objects[0]
|
oid = deny_objects[0]
|
||||||
with pytest.raises(Exception, match=OBJECT_NO_ACCESS):
|
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||||
head_object(other_wallet, cid, oid, self.shell, endpoint)
|
head_object(other_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
with pytest.raises(Exception, match=OBJECT_NO_ACCESS):
|
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||||
assert get_object_from_random_node(other_wallet, cid, oid, self.shell, self.cluster)
|
assert get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster)
|
||||||
|
|
||||||
with pytest.raises(Exception, match=OBJECT_NO_ACCESS):
|
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||||
assert put_object_to_random_node(other_wallet, file_path, cid, self.shell, self.cluster, attributes=deny_attribute)
|
assert put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=deny_attribute)
|
||||||
|
|
||||||
with reporter.step("Check others can get and put objects without attributes matching the filter with bearer token"):
|
with reporter.step("Check others can get and put objects without attributes matching the filter with bearer token"):
|
||||||
oid = deny_objects.pop()
|
oid = deny_objects.pop()
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
head_object(other_wallet, cid, oid, self.shell, endpoint, bearer)
|
head_object(other_wallet, container, oid, self.shell, rpc_endpoint, bearer)
|
||||||
|
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
get_object_from_random_node(other_wallet, cid, oid, self.shell, self.cluster, bearer)
|
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
|
||||||
|
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
put_object_to_random_node(other_wallet, file_path, cid, self.shell, self.cluster, bearer, attributes=allow_attribute)
|
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer, attributes=allow_attribute)
|
||||||
|
|
||||||
@allure.title("PUT and GET object using bearer with objectID in filter (obj_size={object_size}, match_type=NOT_EQUAL)")
|
@allure.title("PUT and GET object using bearer with objectID in filter (obj_size={object_size}, match_type=NOT_EQUAL)")
|
||||||
|
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
|
||||||
def test_ape_filter_object_id_not_equals(
|
def test_ape_filter_object_id_not_equals(
|
||||||
self,
|
self,
|
||||||
frostfs_cli: FrostfsCli,
|
frostfs_cli: FrostfsCli,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
other_wallet: WalletInfo,
|
other_wallet: WalletInfo,
|
||||||
private_container: str,
|
container: str,
|
||||||
temp_directory: str,
|
temp_directory: str,
|
||||||
file_path: TestFile,
|
file_path: TestFile,
|
||||||
):
|
):
|
||||||
with reporter.step("Put object to container"):
|
with reporter.step("Put object to container"):
|
||||||
oid = put_object_to_random_node(default_wallet, file_path, private_container, self.shell, self.cluster)
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Create bearer token with objectID filter"):
|
with reporter.step("Create bearer token with objectID filter"):
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
||||||
object_condition = ape.Condition.by_object_id(oid, ape.ConditionType.RESOURCE, ape.MatchType.NOT_EQUAL)
|
object_condition = ape.Condition.by_object_id(oid, ape.ConditionType.RESOURCE, ape.MatchType.NOT_EQUAL)
|
||||||
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, [role_condition, object_condition])
|
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, [role_condition, object_condition])
|
||||||
bearer = create_bearer_token(frostfs_cli, temp_directory, private_container, rule, self.cluster.default_rpc_endpoint)
|
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step("Others should be able to put object using bearer token"):
|
with reporter.step("Others should be able to put object using bearer token"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
put_object_to_random_node(other_wallet, file_path, private_container, self.shell, self.cluster, bearer)
|
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer)
|
||||||
|
|
||||||
with reporter.step("Others should not be able to get object matching the filter"):
|
with reporter.step("Others should not be able to get object matching the filter"):
|
||||||
with pytest.raises(Exception, match=OBJECT_NO_ACCESS):
|
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||||
get_object_from_random_node(other_wallet, private_container, oid, self.shell, self.cluster, bearer)
|
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
|
||||||
|
|
||||||
@allure.title("PUT and GET object using bearer with objectID in filter (obj_size={object_size}, match_type=EQUAL)")
|
@allure.title("PUT and GET object using bearer with objectID in filter (obj_size={object_size}, match_type=EQUAL)")
|
||||||
|
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
|
||||||
def test_ape_filter_object_id_equals(
|
def test_ape_filter_object_id_equals(
|
||||||
self,
|
self,
|
||||||
frostfs_cli: FrostfsCli,
|
frostfs_cli: FrostfsCli,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
other_wallet: WalletInfo,
|
other_wallet: WalletInfo,
|
||||||
private_container: str,
|
container: str,
|
||||||
temp_directory: str,
|
temp_directory: str,
|
||||||
file_path: TestFile,
|
file_path: TestFile,
|
||||||
):
|
):
|
||||||
with reporter.step("Put object to container"):
|
with reporter.step("Put object to container"):
|
||||||
oid = put_object_to_random_node(default_wallet, file_path, private_container, self.shell, self.cluster)
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Create bearer token with objectID filter"):
|
with reporter.step("Create bearer token with objectID filter"):
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
||||||
object_condition = ape.Condition.by_object_id(oid, ape.ConditionType.RESOURCE, ape.MatchType.EQUAL)
|
object_condition = ape.Condition.by_object_id(oid, ape.ConditionType.RESOURCE, ape.MatchType.EQUAL)
|
||||||
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, [role_condition, object_condition])
|
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, [role_condition, object_condition])
|
||||||
bearer = create_bearer_token(frostfs_cli, temp_directory, private_container, rule, self.cluster.default_rpc_endpoint)
|
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step("Others should not be able to put object using bearer token"):
|
with reporter.step("Others should not be able to put object using bearer token"):
|
||||||
with pytest.raises(Exception, match=OBJECT_NO_ACCESS):
|
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||||
put_object_to_random_node(other_wallet, file_path, private_container, self.shell, self.cluster, bearer)
|
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer)
|
||||||
|
|
||||||
with reporter.step("Others should be able to get object matching the filter"):
|
with reporter.step("Others should be able to get object matching the filter"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
get_object_from_random_node(other_wallet, private_container, oid, self.shell, self.cluster, bearer)
|
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
|
||||||
|
|
|
@ -5,9 +5,10 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
from frostfs_testlib.storage.dataclasses import ape
|
from frostfs_testlib.storage.dataclasses import ape
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.utils.file_utils import TestFile
|
||||||
|
|
||||||
from pytest_tests.helpers.bearer_token import create_bearer_token
|
from ....helpers.bearer_token import create_bearer_token
|
||||||
from pytest_tests.helpers.container_access import (
|
from ....helpers.container_access import (
|
||||||
ALL_OBJECT_OPERATIONS,
|
ALL_OBJECT_OPERATIONS,
|
||||||
assert_access_to_container,
|
assert_access_to_container,
|
||||||
assert_full_access_to_container,
|
assert_full_access_to_container,
|
||||||
|
@ -15,6 +16,7 @@ from pytest_tests.helpers.container_access import (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
@pytest.mark.bearer
|
@pytest.mark.bearer
|
||||||
@pytest.mark.ape
|
@pytest.mark.ape
|
||||||
|
@ -23,21 +25,21 @@ class TestApeBearer(ClusterTestBase):
|
||||||
@pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True)
|
@pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True)
|
||||||
def test_bearer_token_operations(
|
def test_bearer_token_operations(
|
||||||
self,
|
self,
|
||||||
container_with_objects: tuple[str, list[str], str],
|
container: str,
|
||||||
|
objects: list[str],
|
||||||
frostfs_cli: FrostfsCli,
|
frostfs_cli: FrostfsCli,
|
||||||
temp_directory: str,
|
temp_directory: str,
|
||||||
test_wallet: WalletInfo,
|
test_wallet: WalletInfo,
|
||||||
role: ape.Role,
|
role: ape.Role,
|
||||||
|
file_path: TestFile,
|
||||||
|
rpc_endpoint: str,
|
||||||
):
|
):
|
||||||
cid, objects_oids, file_path = container_with_objects
|
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step(f"Check {role} has full access to container without bearer token"):
|
with reporter.step(f"Check {role} has full access to container without bearer token"):
|
||||||
assert_full_access_to_container(test_wallet, cid, objects_oids.pop(), file_path, self.shell, self.cluster)
|
assert_full_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step(f"Deny all operations for everyone via APE"):
|
with reporter.step(f"Deny all operations for everyone via APE"):
|
||||||
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS)
|
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS)
|
||||||
frostfs_cli.ape_manager.add(endpoint, rule.chain_id, target_name=cid, target_type="container", rule=rule.as_string())
|
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
@ -46,25 +48,25 @@ class TestApeBearer(ClusterTestBase):
|
||||||
bearer = create_bearer_token(
|
bearer = create_bearer_token(
|
||||||
frostfs_cli,
|
frostfs_cli,
|
||||||
temp_directory,
|
temp_directory,
|
||||||
cid,
|
container,
|
||||||
rule=ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS),
|
rule=ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS),
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
endpoint=rpc_endpoint,
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step(f"Check {role} without token has no access to all operations with container"):
|
with reporter.step(f"Check {role} without token has no access to all operations with container"):
|
||||||
assert_no_access_to_container(test_wallet, cid, objects_oids.pop(), file_path, self.shell, self.cluster)
|
assert_no_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step(f"Check {role} with token has access to all operations with container"):
|
with reporter.step(f"Check {role} with token has access to all operations with container"):
|
||||||
assert_full_access_to_container(test_wallet, cid, objects_oids.pop(), file_path, self.shell, self.cluster, bearer)
|
assert_full_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster, bearer)
|
||||||
|
|
||||||
with reporter.step(f"Remove deny rule from APE"):
|
with reporter.step(f"Remove deny rule from APE"):
|
||||||
frostfs_cli.ape_manager.remove(endpoint, rule.chain_id, target_name=cid, target_type="container")
|
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
|
||||||
with reporter.step(f"Check {role} without token has access to all operations with container"):
|
with reporter.step(f"Check {role} without token has access to all operations with container"):
|
||||||
assert_full_access_to_container(test_wallet, cid, objects_oids.pop(), file_path, self.shell, self.cluster)
|
assert_full_access_to_container(test_wallet, container, objects.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
@allure.title("BearerToken for compound operations (obj_size={object_size})")
|
@allure.title("BearerToken for compound operations (obj_size={object_size})")
|
||||||
def test_bearer_token_compound_operations(
|
def test_bearer_token_compound_operations(
|
||||||
|
@ -73,16 +75,16 @@ class TestApeBearer(ClusterTestBase):
|
||||||
temp_directory: str,
|
temp_directory: str,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
other_wallet: WalletInfo,
|
other_wallet: WalletInfo,
|
||||||
container_with_objects: tuple[str, list[str], str],
|
container: str,
|
||||||
|
objects: list[str],
|
||||||
|
rpc_endpoint: str,
|
||||||
|
file_path: TestFile,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Bearer Token COMPLETLY overrides chains set for the specific target.
|
Bearer Token COMPLETLY overrides chains set for the specific target.
|
||||||
Thus, any restictions or permissions should be explicitly defined in BT.
|
Thus, any restictions or permissions should be explicitly defined in BT.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
cid, objects_oids, file_path = container_with_objects
|
|
||||||
|
|
||||||
wallets_map = {
|
wallets_map = {
|
||||||
ape.Role.OWNER: default_wallet,
|
ape.Role.OWNER: default_wallet,
|
||||||
ape.Role.OTHERS: other_wallet,
|
ape.Role.OTHERS: other_wallet,
|
||||||
|
@ -111,25 +113,24 @@ class TestApeBearer(ClusterTestBase):
|
||||||
|
|
||||||
bt_access_map = {
|
bt_access_map = {
|
||||||
ape.Role.OWNER: {
|
ape.Role.OWNER: {
|
||||||
ape.ObjectOperations.PUT: True,
|
|
||||||
ape.ObjectOperations.GET: True,
|
|
||||||
ape.ObjectOperations.HEAD: True,
|
|
||||||
ape.ObjectOperations.GET_RANGE: True,
|
|
||||||
ape.ObjectOperations.GET_RANGE_HASH: True,
|
|
||||||
ape.ObjectOperations.SEARCH: True,
|
|
||||||
ape.ObjectOperations.DELETE: True,
|
|
||||||
},
|
|
||||||
ape.Role.OTHERS: {
|
|
||||||
ape.ObjectOperations.PUT: True,
|
ape.ObjectOperations.PUT: True,
|
||||||
ape.ObjectOperations.GET: False,
|
ape.ObjectOperations.GET: False,
|
||||||
ape.ObjectOperations.HEAD: True,
|
ape.ObjectOperations.HEAD: True,
|
||||||
|
ape.ObjectOperations.GET_RANGE: True,
|
||||||
|
ape.ObjectOperations.GET_RANGE_HASH: False,
|
||||||
|
ape.ObjectOperations.SEARCH: False,
|
||||||
|
ape.ObjectOperations.DELETE: True,
|
||||||
|
},
|
||||||
|
# Bearer Token COMPLETLY overrides chains set for the specific target.
|
||||||
|
# Thus, any restictions or permissions should be explicitly defined in BT.
|
||||||
|
ape.Role.OTHERS: {
|
||||||
|
ape.ObjectOperations.PUT: False,
|
||||||
|
ape.ObjectOperations.GET: False,
|
||||||
|
ape.ObjectOperations.HEAD: False,
|
||||||
ape.ObjectOperations.GET_RANGE: False,
|
ape.ObjectOperations.GET_RANGE: False,
|
||||||
ape.ObjectOperations.GET_RANGE_HASH: False,
|
ape.ObjectOperations.GET_RANGE_HASH: False,
|
||||||
# Although SEARCH is denied by the APE chain defined in Policy contract,
|
ape.ObjectOperations.SEARCH: False,
|
||||||
# Bearer Token COMPLETLY overrides chains set for the specific target.
|
ape.ObjectOperations.DELETE: False,
|
||||||
# Thus, any restictions or permissions should be explicitly defined in BT.
|
|
||||||
ape.ObjectOperations.SEARCH: True,
|
|
||||||
ape.ObjectOperations.DELETE: True,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,9 +147,11 @@ class TestApeBearer(ClusterTestBase):
|
||||||
# Operations that we will allow for each role with bearer token
|
# Operations that we will allow for each role with bearer token
|
||||||
bearer_map = {
|
bearer_map = {
|
||||||
ape.Role.OWNER: [
|
ape.Role.OWNER: [
|
||||||
ape.ObjectOperations.DELETE,
|
|
||||||
ape.ObjectOperations.PUT,
|
ape.ObjectOperations.PUT,
|
||||||
|
ape.ObjectOperations.HEAD,
|
||||||
ape.ObjectOperations.GET_RANGE,
|
ape.ObjectOperations.GET_RANGE,
|
||||||
|
# Delete also requires PUT (to make tobstone) and HEAD (to get simple objects header)
|
||||||
|
ape.ObjectOperations.DELETE,
|
||||||
],
|
],
|
||||||
ape.Role.OTHERS: [
|
ape.Role.OTHERS: [
|
||||||
ape.ObjectOperations.GET,
|
ape.ObjectOperations.GET,
|
||||||
|
@ -167,24 +170,26 @@ class TestApeBearer(ClusterTestBase):
|
||||||
for role, operations in deny_map.items():
|
for role, operations in deny_map.items():
|
||||||
with reporter.step(f"Add APE deny rule for {role}"):
|
with reporter.step(f"Add APE deny rule for {role}"):
|
||||||
rule = ape.Rule(ape.Verb.DENY, operations, conditions_map[role])
|
rule = ape.Rule(ape.Verb.DENY, operations, conditions_map[role])
|
||||||
frostfs_cli.ape_manager.add(endpoint, rule.chain_id, target_name=cid, target_type="container", rule=rule.as_string())
|
frostfs_cli.ape_manager.add(
|
||||||
|
rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string()
|
||||||
|
)
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
with reporter.step("Wait for one block"):
|
||||||
self.wait_for_blocks()
|
self.wait_for_blocks()
|
||||||
|
|
||||||
for role, wallet in wallets_map.items():
|
for role, wallet in wallets_map.items():
|
||||||
with reporter.step(f"Assert access to container without bearer token for {role}"):
|
with reporter.step(f"Assert access to container without bearer token for {role}"):
|
||||||
assert_access_to_container(access_map[role], wallet, cid, objects_oids.pop(), file_path, self.shell, self.cluster)
|
assert_access_to_container(access_map[role], wallet, container, objects.pop(), file_path, self.shell, self.cluster)
|
||||||
|
|
||||||
bearer_tokens = {}
|
bearer_tokens = {}
|
||||||
for role in wallets_map.keys():
|
for role in wallets_map.keys():
|
||||||
with reporter.step(f"Create bearer token for {role}"):
|
with reporter.step(f"Create bearer token for {role}"):
|
||||||
rule = ape.Rule(verb_map[role], bearer_map[role], conditions_map[role])
|
rule = ape.Rule(verb_map[role], bearer_map[role], conditions_map[role])
|
||||||
bt = create_bearer_token(frostfs_cli, temp_directory, cid, rule, endpoint)
|
bt = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
|
||||||
bearer_tokens[role] = bt
|
bearer_tokens[role] = bt
|
||||||
|
|
||||||
for role, wallet in wallets_map.items():
|
for role, wallet in wallets_map.items():
|
||||||
with reporter.step(f"Assert access to container with bearer token for {role}"):
|
with reporter.step(f"Assert access to container with bearer token for {role}"):
|
||||||
assert_access_to_container(
|
assert_access_to_container(
|
||||||
bt_access_map[role], wallet, cid, objects_oids.pop(), file_path, self.shell, self.cluster, bearer_tokens[role]
|
bt_access_map[role], wallet, container, objects.pop(), file_path, self.shell, self.cluster, bearer_tokens[role]
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
|
import json
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
from frostfs_testlib.steps.cli.container import search_nodes_with_container
|
||||||
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.storage.dataclasses import ape
|
from frostfs_testlib.storage.dataclasses import ape
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.parallel import parallel
|
from frostfs_testlib.testing.parallel import parallel
|
||||||
|
@ -39,26 +40,33 @@ def test_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.
|
||||||
return role_to_wallet_map[role]
|
return role_to_wallet_map[role]
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
@pytest.fixture
|
||||||
def container_with_objects(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, file_path: str) -> tuple[str, list[str], str]:
|
def objects(container: str, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, file_path: str):
|
||||||
|
|
||||||
with reporter.step("Create public container"):
|
|
||||||
cid = create_container(
|
|
||||||
default_wallet,
|
|
||||||
shell=client_shell,
|
|
||||||
endpoint=cluster.default_rpc_endpoint,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Add test objects to container"):
|
with reporter.step("Add test objects to container"):
|
||||||
put_results = parallel(
|
put_results = parallel(
|
||||||
[put_object_to_random_node] * OBJECT_COUNT,
|
[put_object_to_random_node] * OBJECT_COUNT,
|
||||||
wallet=default_wallet,
|
wallet=default_wallet,
|
||||||
path=file_path,
|
path=file_path,
|
||||||
cid=cid,
|
cid=container,
|
||||||
shell=client_shell,
|
shell=client_shell,
|
||||||
cluster=cluster,
|
cluster=cluster,
|
||||||
)
|
)
|
||||||
objects_oids = [put_result.result() for put_result in put_results]
|
objects_oids = [put_result.result() for put_result in put_results]
|
||||||
|
|
||||||
return cid, objects_oids, file_path
|
return objects_oids
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def container_nodes(default_wallet: WalletInfo, container: str, client_shell: Shell, cluster: Cluster) -> list[ClusterNode]:
|
||||||
|
cid = container
|
||||||
|
container_holder_nodes = search_nodes_with_container(default_wallet, cid, client_shell, cluster.default_rpc_endpoint, cluster)
|
||||||
|
|
||||||
|
report_data = {node.id: node.host_ip for node in container_holder_nodes}
|
||||||
|
reporter.attach(json.dumps(report_data, indent=2), "container_nodes.json")
|
||||||
|
|
||||||
|
return container_holder_nodes
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def container_node_wallet(container_nodes: list[ClusterNode]) -> WalletInfo:
|
||||||
|
return WalletInfo.from_node(container_nodes[0].storage_node)
|
||||||
|
|
0
pytest_tests/testsuites/ape/__init__.py
Normal file
0
pytest_tests/testsuites/ape/__init__.py
Normal file
52
pytest_tests/testsuites/ape/conftest.py
Normal file
52
pytest_tests/testsuites/ape/conftest.py
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
|
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
||||||
|
from frostfs_testlib.shell.interfaces import Shell
|
||||||
|
from frostfs_testlib.steps.cli.object import put_object
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def frostfs_cli_on_first_node(cluster: Cluster) -> FrostfsCli:
|
||||||
|
node = cluster.cluster_nodes[0]
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
|
||||||
|
return FrostfsCli(shell, FROSTFS_CLI_EXEC, node.storage_node.get_remote_wallet_config_path())
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def object_id(
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
cluster: Cluster,
|
||||||
|
client_shell: Shell,
|
||||||
|
container: str,
|
||||||
|
) -> str:
|
||||||
|
test_file = generate_file(simple_object_size.value)
|
||||||
|
|
||||||
|
with reporter.step("Allow PutObject on first node via local override"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowPutObject",
|
||||||
|
rule=f"allow Object.Put *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put objects in container on the first node"):
|
||||||
|
object_id = put_object(default_wallet, test_file, container, client_shell, cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Remove PutObject local override from first node"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowPutObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
return object_id
|
212
pytest_tests/testsuites/ape/test_ape_local_container.py
Normal file
212
pytest_tests/testsuites/ape/test_ape_local_container.py
Normal file
|
@ -0,0 +1,212 @@
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
|
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
||||||
|
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
|
from frostfs_testlib.resources.error_patterns import RULE_ACCESS_DENIED_CONTAINER
|
||||||
|
from frostfs_testlib.shell.interfaces import Shell
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.ape import Operations
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.testing.parallel import parallel
|
||||||
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
|
from frostfs_testlib.utils import datetime_utils
|
||||||
|
|
||||||
|
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest, MultipleContainersRequest
|
||||||
|
|
||||||
|
REP2 = ContainerRequest("REP 2", ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="REP2_allow_all_ape")
|
||||||
|
|
||||||
|
|
||||||
|
def remove_local_overrides_on_node(node: ClusterNode):
|
||||||
|
target = "Chain ID"
|
||||||
|
shell: Shell = node.host.get_shell()
|
||||||
|
remote_config: str = node.storage_node.get_remote_wallet_config_path()
|
||||||
|
cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=remote_config)
|
||||||
|
with reporter.step(f"Check local overrides on {node.storage_node.id} node"):
|
||||||
|
rules = cli.control.list_rules(
|
||||||
|
endpoint=node.storage_node.get_control_endpoint(), target_name="root", target_type="namespace"
|
||||||
|
).stdout
|
||||||
|
if target in rules:
|
||||||
|
with reporter.step("Delete rules"):
|
||||||
|
chain_ids = [i.split(" ")[2].strip() for i in rules.split("\n") if "Chain ID" in i]
|
||||||
|
for chain_id in chain_ids:
|
||||||
|
cli.control.remove_rule(
|
||||||
|
endpoint=node.storage_node.get_control_endpoint(),
|
||||||
|
target_type="namespace",
|
||||||
|
target_name="root",
|
||||||
|
chain_id=chain_id,
|
||||||
|
)
|
||||||
|
with reporter.step("Wait for one block"):
|
||||||
|
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def remove_local_ape_overrides(cluster: Cluster) -> None:
|
||||||
|
yield
|
||||||
|
with reporter.step("Check local overrides on nodes."):
|
||||||
|
parallel(remove_local_overrides_on_node, cluster.cluster_nodes)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.ape
|
||||||
|
@pytest.mark.ape_local
|
||||||
|
@pytest.mark.ape_container
|
||||||
|
@pytest.mark.ape_namespace
|
||||||
|
class TestApeLocalOverrideContainer(ClusterTestBase):
|
||||||
|
@allure.title("LocalOverride: Deny to GetContainer in root tenant")
|
||||||
|
def test_local_override_deny_to_get_container_root(
|
||||||
|
self,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
container: str,
|
||||||
|
remove_local_ape_overrides: None,
|
||||||
|
):
|
||||||
|
with reporter.step("Create a namespace rule for the first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="namespace",
|
||||||
|
target_name="root",
|
||||||
|
chain_id="denyContainerGet",
|
||||||
|
rule="deny Container.Get *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check get the container property on the first node, expected denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_CONTAINER.format(operation=Operations.GET_CONTAINER)):
|
||||||
|
frostfs_cli.container.get(self.cluster.storage_nodes[0].get_rpc_endpoint(), container)
|
||||||
|
|
||||||
|
with reporter.step("Check get the container property on the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
frostfs_cli.container.get(self.cluster.storage_nodes[1].get_rpc_endpoint(), container)
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="namespace",
|
||||||
|
target_name="root",
|
||||||
|
chain_id="denyContainerGet",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check get the container property on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
frostfs_cli.container.get(self.cluster.storage_nodes[0].get_rpc_endpoint(), container)
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Deny to PutContainer in root tenant")
|
||||||
|
def test_local_override_deny_to_put_container_root(
|
||||||
|
self,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
remove_local_ape_overrides: None,
|
||||||
|
):
|
||||||
|
with reporter.step("Create a namespace rule for the first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="namespace",
|
||||||
|
target_name="root",
|
||||||
|
chain_id="denyContainerPut",
|
||||||
|
rule="deny Container.Put *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check create container on the first node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_CONTAINER.format(operation=Operations.PUT_CONTAINER)):
|
||||||
|
frostfs_cli.container.create(
|
||||||
|
rpc_endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
|
||||||
|
policy="REP 1",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check create a container on the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
frostfs_cli.container.create(
|
||||||
|
rpc_endpoint=self.cluster.storage_nodes[1].get_rpc_endpoint(),
|
||||||
|
policy="REP 1",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="namespace",
|
||||||
|
target_name="root",
|
||||||
|
chain_id="denyContainerPut",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check create a container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
frostfs_cli.container.create(
|
||||||
|
rpc_endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
|
||||||
|
policy="REP 1",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Deny to ListContainer in root tenant")
|
||||||
|
def test_local_override_deny_to_list_container_root(
|
||||||
|
self,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
remove_local_ape_overrides: None,
|
||||||
|
):
|
||||||
|
with reporter.step("Create a namespace rule for the first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="namespace",
|
||||||
|
target_name="root",
|
||||||
|
chain_id="denyContainerList",
|
||||||
|
rule="deny Container.List *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check list the container properties on the first node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_CONTAINER.format(operation=Operations.LIST_CONTAINER)):
|
||||||
|
frostfs_cli.container.list(rpc_endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(), ttl=1)
|
||||||
|
|
||||||
|
with reporter.step("Check list the container properties on the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
frostfs_cli.container.list(rpc_endpoint=self.cluster.storage_nodes[1].get_rpc_endpoint(), ttl=1)
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="namespace",
|
||||||
|
target_name="root",
|
||||||
|
chain_id="denyContainerList",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check display a list of containers on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
frostfs_cli.container.list(rpc_endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(), ttl=1)
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Deny to DeleteContainer in root tenant")
|
||||||
|
@pytest.mark.parametrize("multiple_containers_request", [MultipleContainersRequest([REP2, REP2])], indirect=True)
|
||||||
|
def test_local_override_deny_to_delete_container_root(
|
||||||
|
self,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
remove_local_ape_overrides: None,
|
||||||
|
containers: list[str],
|
||||||
|
):
|
||||||
|
with reporter.step("Create a namespace rule for the first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="namespace",
|
||||||
|
target_name="root",
|
||||||
|
chain_id="denyContainerDelete",
|
||||||
|
rule="deny Container.Delete *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check delete first container from the first node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_CONTAINER.format(operation=Operations.DELETE_CONTAINER)):
|
||||||
|
frostfs_cli.container.delete(self.cluster.storage_nodes[0].get_rpc_endpoint(), containers[0], ttl=1)
|
||||||
|
|
||||||
|
with reporter.step("Check delete a second container from the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
frostfs_cli.container.delete(self.cluster.storage_nodes[1].get_rpc_endpoint(), containers[1], ttl=1)
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="namespace",
|
||||||
|
target_name="root",
|
||||||
|
chain_id="denyContainerDelete",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check delete first container from the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
frostfs_cli.container.delete(self.cluster.storage_nodes[0].get_rpc_endpoint(), containers[0], ttl=1)
|
254
pytest_tests/testsuites/ape/test_ape_local_object_allow.py
Normal file
254
pytest_tests/testsuites/ape/test_ape_local_object_allow.py
Normal file
|
@ -0,0 +1,254 @@
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
|
from frostfs_testlib.resources.error_patterns import NO_RULE_FOUND_OBJECT
|
||||||
|
from frostfs_testlib.steps.cli.object import delete_object, get_object, get_range, get_range_hash, head_object, put_object, search_object
|
||||||
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ...helpers.container_request import ContainerRequest
|
||||||
|
|
||||||
|
REP1_MSK = ContainerRequest("REP 1 IN MOW CBF 1 SELECT 1 FROM MSK AS MOW FILTER SubDivCode EQ MOW AS MSK", short_name="REP1_MSK_no_ape")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.ape
|
||||||
|
@pytest.mark.ape_local
|
||||||
|
@pytest.mark.ape_object
|
||||||
|
@pytest.mark.ape_allow
|
||||||
|
@pytest.mark.parametrize("container_request", [REP1_MSK], indirect=True)
|
||||||
|
class TestApeLocalOverrideAllow(ClusterTestBase):
|
||||||
|
@allure.title("LocalOverride: Allow to GetObject in root tenant")
|
||||||
|
def test_local_override_allow_to_get_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
container: str,
|
||||||
|
object_id: str,
|
||||||
|
):
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowGetObject",
|
||||||
|
rule=f"allow Object.Get *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check get object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
get_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check get object in container on the second node, epxected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
|
||||||
|
get_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowGetObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Allow to PutObject in root tenant")
|
||||||
|
def test_local_override_allow_to_put_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
):
|
||||||
|
test_file = generate_file(simple_object_size.value)
|
||||||
|
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowPutObject",
|
||||||
|
rule=f"allow Object.Put *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check put object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check get object in container on the second node, epxected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
|
||||||
|
put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowPutObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Allow to HeadObject in root tenant")
|
||||||
|
def test_local_override_allow_to_head_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
container: str,
|
||||||
|
object_id: str,
|
||||||
|
):
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowHeadObject",
|
||||||
|
rule=f"allow Object.Head *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check head object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
head_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check head object in container on the second node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
|
||||||
|
head_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowHeadObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Allow to SearchObject in root tenant")
|
||||||
|
def test_local_override_allow_to_search_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
container: str,
|
||||||
|
):
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowSearchObject",
|
||||||
|
rule=f"allow Object.Search *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check search object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
search_object(default_wallet, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check search object from container on the second node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
|
||||||
|
search_object(default_wallet, container, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowSearchObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Allow to RangeObject in root tenant")
|
||||||
|
def test_local_override_allow_to_range_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
container: str,
|
||||||
|
object_id: str,
|
||||||
|
):
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowRangeObject",
|
||||||
|
rule=f"allow Object.Range *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check get range object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
get_range(default_wallet, container, object_id, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check range object in container on the second node. expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
|
||||||
|
get_range(default_wallet, container, object_id, "0:10", self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowRangeObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Allow to HashObject in root tenant")
|
||||||
|
def test_local_override_allow_to_hash_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
container: str,
|
||||||
|
object_id: str,
|
||||||
|
):
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowHashObject",
|
||||||
|
rule=f"allow Object.Hash *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check get range hash object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
get_range_hash(default_wallet, container, object_id, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check get range hash object in container on the second node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
|
||||||
|
get_range_hash(default_wallet, container, object_id, "0:10", self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowHashObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Allow to DeleteObject in root tenant")
|
||||||
|
def test_local_override_allow_to_delete_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
container: str,
|
||||||
|
object_id: str,
|
||||||
|
):
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowDeleteObject",
|
||||||
|
rule=f"allow Object.Head Object.Delete *",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check delete object from container on the second node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
|
||||||
|
delete_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check delete object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
delete_object(default_wallet, container, object_id, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="allowDeleteObject",
|
||||||
|
)
|
333
pytest_tests/testsuites/ape/test_ape_local_object_deny.py
Normal file
333
pytest_tests/testsuites/ape/test_ape_local_object_deny.py
Normal file
|
@ -0,0 +1,333 @@
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED, RULE_ACCESS_DENIED_OBJECT
|
||||||
|
from frostfs_testlib.steps.cli.object import delete_object, get_object, get_range, get_range_hash, head_object, put_object, search_object
|
||||||
|
from frostfs_testlib.storage.dataclasses.ape import Operations
|
||||||
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
|
||||||
|
|
||||||
|
reporter = get_reporter()
|
||||||
|
|
||||||
|
REP2 = ContainerRequest("REP 2", ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="REP2_allow_all_ape")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.ape
|
||||||
|
@pytest.mark.ape_local
|
||||||
|
@pytest.mark.ape_object
|
||||||
|
@pytest.mark.ape_deny
|
||||||
|
class TestApeLocalOverrideDeny(ClusterTestBase):
|
||||||
|
@allure.title("LocalOverride: Deny to GetObject in root tenant")
|
||||||
|
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
|
||||||
|
def test_local_override_deny_to_get_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
):
|
||||||
|
test_file = generate_file(simple_object_size.value)
|
||||||
|
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyGetObject",
|
||||||
|
rule=f"deny Object.Get /{container}/*",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put object in container on the first node"):
|
||||||
|
oid = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check get object from container on the first node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
|
||||||
|
get_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check get object from container on the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
get_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyGetObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check get object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
get_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Deny to PutObject in root tenant")
|
||||||
|
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
|
||||||
|
def test_local_override_deny_to_put_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
):
|
||||||
|
test_file = generate_file(simple_object_size.value)
|
||||||
|
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyPutObject",
|
||||||
|
rule=f"deny Object.Put /{container}/*",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check put object from container on the first node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=OBJECT_ACCESS_DENIED):
|
||||||
|
put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check put object from container on the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
put_object(
|
||||||
|
default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint(), copies_number=3
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyPutObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check get object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Deny to HeadObject in root tenant")
|
||||||
|
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
|
||||||
|
def test_local_override_deny_to_head_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
):
|
||||||
|
test_file = generate_file(simple_object_size.value)
|
||||||
|
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyHeadObject",
|
||||||
|
rule=f"deny Object.Head /{container}/*",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put object in container on the first node"):
|
||||||
|
oid = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check head object from container on the first node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
|
||||||
|
head_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check head object from container on the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
head_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyHeadObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check head object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
head_object(default_wallet, container, oid, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Deny to SearchObject in root tenant")
|
||||||
|
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
|
||||||
|
def test_local_override_deny_to_search_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
container: str,
|
||||||
|
):
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denySearchObject",
|
||||||
|
rule=f"deny Object.Search /{container}/*",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check search object from container on the first node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT.format(operation=Operations.SEARCH_OBJECT)):
|
||||||
|
search_object(default_wallet, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check search object from container on the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
search_object(default_wallet, container, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denySearchObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check search object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
search_object(default_wallet, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Deny to RangeObject in root tenant")
|
||||||
|
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
|
||||||
|
def test_local_override_deny_to_range_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
):
|
||||||
|
test_file = generate_file(simple_object_size.value)
|
||||||
|
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyRangeObject",
|
||||||
|
rule=f"deny Object.Range /{container}/*",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put object in container on the first node"):
|
||||||
|
oid = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check range object from container on the first node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT.format(operation=Operations.RANGE_OBJECT)):
|
||||||
|
get_range(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check get range object from container on the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
get_range(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyRangeObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check get range object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
get_range(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Deny to HashObject in root tenant")
|
||||||
|
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
|
||||||
|
def test_local_override_deny_to_hash_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
):
|
||||||
|
test_file = generate_file(simple_object_size.value)
|
||||||
|
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyHashObject",
|
||||||
|
rule=f"deny Object.Hash /{container}/*",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put object in container on the first node"):
|
||||||
|
oid = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check get range hash object from container on the first node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT.format(operation=Operations.HASH_OBJECT)):
|
||||||
|
get_range_hash(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check get range hash object from container on the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
get_range_hash(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyHashObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check get range hash object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
get_range_hash(default_wallet, container, oid, "0:10", self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
@allure.title("LocalOverride: Deny to DeleteObject in root tenant")
|
||||||
|
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
|
||||||
|
def test_local_override_deny_to_delete_object_root(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli_on_first_node: FrostfsCli,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
):
|
||||||
|
test_file = generate_file(simple_object_size.value)
|
||||||
|
|
||||||
|
with reporter.step("Create local override on first node"):
|
||||||
|
frostfs_cli_on_first_node.control.add_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyDeleteObject",
|
||||||
|
rule=f"deny Object.Delete /{container}/*",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put objects in container on the first node"):
|
||||||
|
oid_1 = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
oid_2 = put_object(default_wallet, test_file, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Search object in container on the first node"):
|
||||||
|
search_object_in_container_1 = search_object(
|
||||||
|
default_wallet, container, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint()
|
||||||
|
)
|
||||||
|
assert oid_1 in search_object_in_container_1, f"Object {oid_1} was not found"
|
||||||
|
assert oid_2 in search_object_in_container_1, f"Object {oid_2} was not found"
|
||||||
|
|
||||||
|
with reporter.step("Search object from container on the second node"):
|
||||||
|
search_object_in_container_2 = search_object(
|
||||||
|
default_wallet, container, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint()
|
||||||
|
)
|
||||||
|
assert oid_1 in search_object_in_container_2, f"Object {oid_1} was not found"
|
||||||
|
assert oid_2 in search_object_in_container_2, f"Object {oid_2} was not found"
|
||||||
|
|
||||||
|
with reporter.step("Check delete object from container on the first node, expected access denied error"):
|
||||||
|
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
|
||||||
|
delete_object(default_wallet, container, oid_1, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Check delete object from container on the second node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
delete_object(default_wallet, container, oid_2, self.shell, self.cluster.storage_nodes[1].get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete a rule"):
|
||||||
|
frostfs_cli_on_first_node.control.remove_rule(
|
||||||
|
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
|
||||||
|
target_type="container",
|
||||||
|
target_name=container,
|
||||||
|
chain_id="denyDeleteObject",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Check delete object in container on the first node, expected allow"):
|
||||||
|
with expect_not_raises():
|
||||||
|
delete_object(default_wallet, container, oid_1, self.shell, self.cluster.storage_nodes[0].get_rpc_endpoint())
|
|
@ -1,7 +1,5 @@
|
||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
import random
|
import random
|
||||||
import shutil
|
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
@ -14,11 +12,13 @@ from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
|
||||||
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
||||||
from frostfs_testlib.hosting import Hosting
|
from frostfs_testlib.hosting import Hosting
|
||||||
from frostfs_testlib.resources import optionals
|
from frostfs_testlib.resources import optionals
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR, COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, SIMPLE_OBJECT_SIZE
|
from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, SIMPLE_OBJECT_SIZE
|
||||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
||||||
|
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||||
from frostfs_testlib.shell import LocalShell, Shell
|
from frostfs_testlib.shell import LocalShell, Shell
|
||||||
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, FROSTFS_CLI_EXEC
|
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
|
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
|
||||||
|
from frostfs_testlib.steps.epoch import ensure_fresh_epoch
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
|
@ -26,19 +26,24 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
|
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
|
||||||
|
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.testing.parallel import parallel
|
from frostfs_testlib.testing.parallel import parallel
|
||||||
from frostfs_testlib.testing.test_control import run_optionally, wait_for_success
|
from frostfs_testlib.testing.test_control import cached_fixture, run_optionally, wait_for_success
|
||||||
from frostfs_testlib.utils import env_utils, string_utils, version_utils
|
from frostfs_testlib.utils import env_utils, string_utils, version_utils
|
||||||
from frostfs_testlib.utils.file_utils import TestFile, generate_file
|
from frostfs_testlib.utils.file_utils import TestFile, generate_file
|
||||||
|
|
||||||
from pytest_tests.resources.common import TEST_CYCLES_COUNT
|
from ..helpers.container_creation import create_container_with_ape, create_containers_with_ape
|
||||||
|
from ..helpers.container_request import EVERYONE_ALLOW_ALL, ContainerRequest, MultipleContainersRequest
|
||||||
|
from ..resources.common import TEST_CYCLES_COUNT
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
SERVICE_ACTIVE_TIME = 20
|
SERVICE_ACTIVE_TIME = 20
|
||||||
WALLTETS_IN_POOL = 2
|
WALLTETS_IN_POOL = 2
|
||||||
|
|
||||||
|
|
||||||
# Add logs check test even if it's not fit to mark selectors
|
# Add logs check test even if it's not fit to mark selectors
|
||||||
def pytest_configure(config: pytest.Config):
|
def pytest_configure(config: pytest.Config):
|
||||||
markers = config.option.markexpr
|
markers = config.option.markexpr
|
||||||
|
@ -49,6 +54,8 @@ def pytest_configure(config: pytest.Config):
|
||||||
number_key = pytest.StashKey[str]()
|
number_key = pytest.StashKey[str]()
|
||||||
start_time = pytest.StashKey[int]()
|
start_time = pytest.StashKey[int]()
|
||||||
test_outcome = pytest.StashKey[str]()
|
test_outcome = pytest.StashKey[str]()
|
||||||
|
|
||||||
|
|
||||||
# pytest hook. Do not rename
|
# pytest hook. Do not rename
|
||||||
def pytest_collection_modifyitems(items: list[pytest.Item]):
|
def pytest_collection_modifyitems(items: list[pytest.Item]):
|
||||||
# Change order of tests based on @pytest.mark.order(<int>) marker
|
# Change order of tests based on @pytest.mark.order(<int>) marker
|
||||||
|
@ -106,11 +113,7 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||||
return
|
return
|
||||||
|
|
||||||
metafunc.fixturenames.append("cycle")
|
metafunc.fixturenames.append("cycle")
|
||||||
metafunc.parametrize(
|
metafunc.parametrize("cycle", range(1, TEST_CYCLES_COUNT + 1), ids=[f"cycle {cycle}" for cycle in range(1, TEST_CYCLES_COUNT + 1)])
|
||||||
"cycle",
|
|
||||||
range(1, TEST_CYCLES_COUNT + 1),
|
|
||||||
ids=[f"cycle {cycle}" for cycle in range(1, TEST_CYCLES_COUNT + 1)],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
@ -139,19 +142,16 @@ def require_multiple_interfaces(cluster: Cluster):
|
||||||
interfaces = cluster.cluster_nodes[0].host.config.interfaces
|
interfaces = cluster.cluster_nodes[0].host.config.interfaces
|
||||||
if "internal1" not in interfaces or "data1" not in interfaces:
|
if "internal1" not in interfaces or "data1" not in interfaces:
|
||||||
pytest.skip("This test requires multiple internal and data interfaces")
|
pytest.skip("This test requires multiple internal and data interfaces")
|
||||||
yield
|
return
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
|
||||||
def max_object_size(cluster: Cluster, client_shell: Shell) -> int:
|
def max_object_size(cluster: Cluster, client_shell: Shell) -> int:
|
||||||
storage_node = cluster.storage_nodes[0]
|
storage_node = cluster.storage_nodes[0]
|
||||||
wallet = WalletInfo.from_node(storage_node)
|
wallet = WalletInfo.from_node(storage_node)
|
||||||
net_info = get_netmap_netinfo(
|
net_info = get_netmap_netinfo(wallet=wallet, endpoint=storage_node.get_rpc_endpoint(), shell=client_shell)
|
||||||
wallet=wallet,
|
return net_info["maximum_object_size"]
|
||||||
endpoint=storage_node.get_rpc_endpoint(),
|
|
||||||
shell=client_shell,
|
|
||||||
)
|
|
||||||
yield net_info["maximum_object_size"]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
@ -160,11 +160,6 @@ def simple_object_size(max_object_size: int) -> ObjectSize:
|
||||||
return ObjectSize("simple", size)
|
return ObjectSize("simple", size)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
|
||||||
def file_path(object_size: ObjectSize) -> TestFile:
|
|
||||||
return generate_file(object_size.value)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def complex_object_size(max_object_size: int) -> ObjectSize:
|
def complex_object_size(max_object_size: int) -> ObjectSize:
|
||||||
size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
|
size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
|
||||||
|
@ -174,8 +169,7 @@ def complex_object_size(max_object_size: int) -> ObjectSize:
|
||||||
# By default we want all tests to be executed with both object sizes
|
# By default we want all tests to be executed with both object sizes
|
||||||
# This can be overriden in choosen tests if needed
|
# This can be overriden in choosen tests if needed
|
||||||
@pytest.fixture(
|
@pytest.fixture(
|
||||||
scope="session",
|
scope="session", params=[pytest.param("simple", marks=pytest.mark.simple), pytest.param("complex", marks=pytest.mark.complex)]
|
||||||
params=[pytest.param("simple", marks=pytest.mark.simple), pytest.param("complex", marks=pytest.mark.complex)],
|
|
||||||
)
|
)
|
||||||
def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize, request: pytest.FixtureRequest) -> ObjectSize:
|
def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize, request: pytest.FixtureRequest) -> ObjectSize:
|
||||||
if request.param == "simple":
|
if request.param == "simple":
|
||||||
|
@ -184,6 +178,22 @@ def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize,
|
||||||
return complex_object_size
|
return complex_object_size
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def test_file(object_size: ObjectSize) -> TestFile:
|
||||||
|
return generate_file(object_size.value)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def test_file_module(object_size: ObjectSize) -> TestFile:
|
||||||
|
return generate_file(object_size.value)
|
||||||
|
|
||||||
|
|
||||||
|
# Deprecated. Please migrate all to test_file
|
||||||
|
@pytest.fixture()
|
||||||
|
def file_path(test_file: TestFile) -> TestFile:
|
||||||
|
return test_file
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def rep_placement_policy() -> PlacementPolicy:
|
def rep_placement_policy() -> PlacementPolicy:
|
||||||
return PlacementPolicy("rep", DEFAULT_PLACEMENT_RULE)
|
return PlacementPolicy("rep", DEFAULT_PLACEMENT_RULE)
|
||||||
|
@ -200,19 +210,24 @@ def frostfs_cli(client_shell: Shell, default_wallet: WalletInfo) -> FrostfsCli:
|
||||||
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC, default_wallet.config_path)
|
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC, default_wallet.config_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
@allure.title("Init GrpcClientWrapper with local Frostfs CLI")
|
||||||
|
def grpc_client(frostfs_cli: FrostfsCli) -> GrpcClientWrapper:
|
||||||
|
return CliClientWrapper(frostfs_cli)
|
||||||
|
|
||||||
|
|
||||||
# By default we want all tests to be executed with both storage policies.
|
# By default we want all tests to be executed with both storage policies.
|
||||||
# This can be overriden in choosen tests if needed.
|
# This can be overriden in choosen tests if needed.
|
||||||
@pytest.fixture(
|
@pytest.fixture(scope="session", params=[pytest.param("rep", marks=pytest.mark.rep), pytest.param("ec", marks=pytest.mark.ec)])
|
||||||
scope="session",
|
|
||||||
params=[pytest.param("rep", marks=pytest.mark.rep), pytest.param("ec", marks=pytest.mark.ec)],
|
|
||||||
)
|
|
||||||
def placement_policy(
|
def placement_policy(
|
||||||
rep_placement_policy: PlacementPolicy, ec_placement_policy: PlacementPolicy, request: pytest.FixtureRequest
|
rep_placement_policy: PlacementPolicy, ec_placement_policy: PlacementPolicy, request: pytest.FixtureRequest
|
||||||
) -> PlacementPolicy:
|
) -> PlacementPolicy:
|
||||||
if request.param == "rep":
|
if request.param == "rep":
|
||||||
return rep_placement_policy
|
return rep_placement_policy
|
||||||
|
elif request.param == "ec":
|
||||||
|
return ec_placement_policy
|
||||||
|
|
||||||
return ec_placement_policy
|
return request.param
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
@ -371,20 +386,6 @@ def collect_binary_versions(hosting: Hosting, client_shell: Shell, request: pyte
|
||||||
env_utils.save_env_properties(file_path, all_versions)
|
env_utils.save_env_properties(file_path, all_versions)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Prepare tmp directory")
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def temp_directory(configure_testlib):
|
|
||||||
with reporter.step("Prepare tmp directory"):
|
|
||||||
full_path = os.path.join(os.getcwd(), ASSETS_DIR)
|
|
||||||
shutil.rmtree(full_path, ignore_errors=True)
|
|
||||||
os.mkdir(full_path)
|
|
||||||
|
|
||||||
yield full_path
|
|
||||||
|
|
||||||
with reporter.step("Remove tmp directory"):
|
|
||||||
shutil.rmtree(full_path)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("[Autouse/Session] Test session start time")
|
@reporter.step("[Autouse/Session] Test session start time")
|
||||||
@pytest.fixture(scope="session", autouse=True)
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
def session_start_time(configure_testlib):
|
def session_start_time(configure_testlib):
|
||||||
|
@ -400,6 +401,11 @@ def after_deploy_healthcheck(cluster: Cluster):
|
||||||
parallel(readiness_on_node, cluster.cluster_nodes)
|
parallel(readiness_on_node, cluster.cluster_nodes)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def rpc_endpoint(cluster: Cluster):
|
||||||
|
return cluster.default_rpc_endpoint
|
||||||
|
|
||||||
|
|
||||||
@wait_for_success(60 * SERVICE_ACTIVE_TIME * 3, 60, title="Wait for {cluster_node} readiness")
|
@wait_for_success(60 * SERVICE_ACTIVE_TIME * 3, 60, title="Wait for {cluster_node} readiness")
|
||||||
def readiness_on_node(cluster_node: ClusterNode):
|
def readiness_on_node(cluster_node: ClusterNode):
|
||||||
if "skip_readiness_check" in cluster_node.host.config.attributes and cluster_node.host.config.attributes["skip_readiness_check"]:
|
if "skip_readiness_check" in cluster_node.host.config.attributes and cluster_node.host.config.attributes["skip_readiness_check"]:
|
||||||
|
@ -427,6 +433,7 @@ def readiness_on_node(cluster_node: ClusterNode):
|
||||||
|
|
||||||
@reporter.step("Prepare default user with wallet")
|
@reporter.step("Prepare default user with wallet")
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
|
||||||
def default_user(credentials_provider: CredentialsProvider, cluster: Cluster) -> User:
|
def default_user(credentials_provider: CredentialsProvider, cluster: Cluster) -> User:
|
||||||
user = User(string_utils.unique_name("user-"))
|
user = User(string_utils.unique_name("user-"))
|
||||||
node = cluster.cluster_nodes[0]
|
node = cluster.cluster_nodes[0]
|
||||||
|
@ -443,6 +450,7 @@ def default_wallet(default_user: User) -> WalletInfo:
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
|
||||||
def wallets_pool(credentials_provider: CredentialsProvider, cluster: Cluster) -> list[WalletInfo]:
|
def wallets_pool(credentials_provider: CredentialsProvider, cluster: Cluster) -> list[WalletInfo]:
|
||||||
users = [User(string_utils.unique_name("user-")) for _ in range(WALLTETS_IN_POOL)]
|
users = [User(string_utils.unique_name("user-")) for _ in range(WALLTETS_IN_POOL)]
|
||||||
parallel(credentials_provider.GRPC.provide, users, cluster_node=cluster.cluster_nodes[0])
|
parallel(credentials_provider.GRPC.provide, users, cluster_node=cluster.cluster_nodes[0])
|
||||||
|
@ -470,3 +478,79 @@ def node_under_test(cluster: Cluster) -> ClusterNode:
|
||||||
selected_node = random.choice(cluster.cluster_nodes)
|
selected_node = random.choice(cluster.cluster_nodes)
|
||||||
reporter.attach(f"{selected_node}", "Selected node")
|
reporter.attach(f"{selected_node}", "Selected node")
|
||||||
return selected_node
|
return selected_node
|
||||||
|
|
||||||
|
|
||||||
|
@allure.title("Init bucket container resolver")
|
||||||
|
@pytest.fixture()
|
||||||
|
def bucket_container_resolver(node_under_test: ClusterNode) -> BucketContainerResolver:
|
||||||
|
resolver_cls = plugins.load_plugin("frostfs.testlib.bucket_cid_resolver", node_under_test.host.config.product)
|
||||||
|
resolver: BucketContainerResolver = resolver_cls()
|
||||||
|
return resolver
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session", params=[pytest.param(EVERYONE_ALLOW_ALL)])
|
||||||
|
def container_request(request: pytest.FixtureRequest) -> ContainerRequest:
|
||||||
|
if "param" in request.__dict__:
|
||||||
|
return request.param
|
||||||
|
|
||||||
|
container_marker = request.node.get_closest_marker("container")
|
||||||
|
# let default container to be public at the moment
|
||||||
|
container_request = EVERYONE_ALLOW_ALL
|
||||||
|
|
||||||
|
if container_marker:
|
||||||
|
if len(container_marker.args) != 1:
|
||||||
|
raise RuntimeError(f"Something wrong with container marker: {container_marker}")
|
||||||
|
container_request = container_marker.args[0]
|
||||||
|
|
||||||
|
if not container_request:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"""Container specification is empty.
|
||||||
|
Add @pytest.mark.parametrize("container_request", [ContainerRequest(...)], indirect=True) decorator."""
|
||||||
|
)
|
||||||
|
|
||||||
|
return container_request
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def multiple_containers_request(request: pytest.FixtureRequest) -> ContainerRequest:
|
||||||
|
if "param" in request.__dict__:
|
||||||
|
return request.param
|
||||||
|
|
||||||
|
raise RuntimeError(
|
||||||
|
f"""Container specification is empty.
|
||||||
|
Add @pytest.mark.parametrize("container_requests", [[ContainerRequest(...), ..., ContainerRequest(...)]], indirect=True) decorator."""
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def container(
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
client_shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
) -> str:
|
||||||
|
return create_container_with_ape(container_request, frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def containers(
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
client_shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
multiple_containers_request: MultipleContainersRequest,
|
||||||
|
) -> list[str]:
|
||||||
|
return create_containers_with_ape(frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint, multiple_containers_request)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def new_epoch(client_shell: Shell, cluster: Cluster) -> int:
|
||||||
|
return ensure_fresh_epoch(client_shell, cluster)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def new_epoch_module_scope(client_shell: Shell, cluster: Cluster) -> int:
|
||||||
|
return ensure_fresh_epoch(client_shell, cluster)
|
||||||
|
|
0
pytest_tests/testsuites/container/__init__.py
Normal file
0
pytest_tests/testsuites/container/__init__.py
Normal file
|
@ -13,41 +13,30 @@ from frostfs_testlib.steps.cli.container import (
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
|
||||||
from pytest_tests.helpers.utility import placement_policy_from_container
|
from ...helpers.utility import placement_policy_from_container
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.container
|
@pytest.mark.nightly
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
|
@pytest.mark.container
|
||||||
class TestContainer(ClusterTestBase):
|
class TestContainer(ClusterTestBase):
|
||||||
|
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
|
||||||
|
|
||||||
@allure.title("Create container (name={name})")
|
@allure.title("Create container (name={name})")
|
||||||
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
|
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
|
||||||
@pytest.mark.smoke
|
@pytest.mark.smoke
|
||||||
def test_container_creation(self, default_wallet: WalletInfo, name: str):
|
def test_container_creation(self, default_wallet: WalletInfo, name: str, rpc_endpoint: str):
|
||||||
wallet = default_wallet
|
wallet = default_wallet
|
||||||
|
|
||||||
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
|
cid = create_container(wallet, self.shell, rpc_endpoint, self.PLACEMENT_RULE, name=name)
|
||||||
cid = create_container(
|
|
||||||
wallet,
|
|
||||||
rule=placement_rule,
|
|
||||||
name=name,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
)
|
|
||||||
|
|
||||||
containers = list_containers(wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
containers = list_containers(wallet, self.shell, rpc_endpoint)
|
||||||
assert cid in containers, f"Expected container {cid} in containers: {containers}"
|
assert cid in containers, f"Expected container {cid} in containers: {containers}"
|
||||||
|
|
||||||
container_info: str = get_container(
|
container_info: str = get_container(wallet, cid, self.shell, rpc_endpoint, False)
|
||||||
wallet,
|
|
||||||
cid,
|
|
||||||
json_mode=False,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
)
|
|
||||||
container_info = container_info.casefold() # To ignore case when comparing with expected values
|
container_info = container_info.casefold() # To ignore case when comparing with expected values
|
||||||
|
|
||||||
info_to_check = {
|
info_to_check = {
|
||||||
f"basic ACL: {PRIVATE_ACL_F} (private)",
|
|
||||||
f"owner ID: {wallet.get_address_from_json(0)}",
|
f"owner ID: {wallet.get_address_from_json(0)}",
|
||||||
f"CID: {cid}",
|
f"CID: {cid}",
|
||||||
}
|
}
|
||||||
|
@ -55,7 +44,7 @@ class TestContainer(ClusterTestBase):
|
||||||
info_to_check.add(f"Name={name}")
|
info_to_check.add(f"Name={name}")
|
||||||
|
|
||||||
with reporter.step("Check container has correct information"):
|
with reporter.step("Check container has correct information"):
|
||||||
expected_policy = placement_rule.casefold()
|
expected_policy = self.PLACEMENT_RULE.casefold()
|
||||||
actual_policy = placement_policy_from_container(container_info)
|
actual_policy = placement_policy_from_container(container_info)
|
||||||
assert actual_policy == expected_policy, f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
|
assert actual_policy == expected_policy, f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
|
||||||
|
|
||||||
|
@ -64,50 +53,42 @@ class TestContainer(ClusterTestBase):
|
||||||
assert expected_info in container_info, f"Expected {expected_info} in container info:\n{container_info}"
|
assert expected_info in container_info, f"Expected {expected_info} in container info:\n{container_info}"
|
||||||
|
|
||||||
with reporter.step("Delete container and check it was deleted"):
|
with reporter.step("Delete container and check it was deleted"):
|
||||||
delete_container(
|
# Force to skip frostfs-cli verifictions before delete.
|
||||||
wallet,
|
# Because no APE rules assigned to container, those verifications will fail due to APE requests denial.
|
||||||
cid,
|
delete_container(wallet, cid, self.shell, rpc_endpoint, force=True, await_mode=True)
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
await_mode=True,
|
|
||||||
)
|
|
||||||
self.tick_epoch()
|
self.tick_epoch()
|
||||||
wait_for_container_deletion(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
wait_for_container_deletion(wallet, cid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
@allure.title("Delete container without force (name={name})")
|
||||||
|
@pytest.mark.smoke
|
||||||
|
def test_container_deletion_no_force(self, container: str, default_wallet: WalletInfo, rpc_endpoint: str):
|
||||||
|
with reporter.step("Delete container and check it was deleted"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=True)
|
||||||
|
self.tick_epoch()
|
||||||
|
wait_for_container_deletion(default_wallet, container, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
@allure.title("Parallel container creation and deletion")
|
@allure.title("Parallel container creation and deletion")
|
||||||
def test_container_creation_deletion_parallel(self, default_wallet: WalletInfo):
|
def test_container_creation_deletion_parallel(self, default_wallet: WalletInfo, rpc_endpoint: str):
|
||||||
containers_count = 3
|
containers_count = 3
|
||||||
wallet = default_wallet
|
wallet = default_wallet
|
||||||
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
|
|
||||||
iteration_count = 10
|
iteration_count = 10
|
||||||
|
|
||||||
for iteration in range(iteration_count):
|
for _ in range(iteration_count):
|
||||||
cids: list[str] = []
|
cids: list[str] = []
|
||||||
with reporter.step(f"Create {containers_count} containers"):
|
with reporter.step(f"Create {containers_count} containers"):
|
||||||
for _ in range(containers_count):
|
for _ in range(containers_count):
|
||||||
cids.append(
|
cids.append(
|
||||||
create_container(
|
create_container(wallet, self.shell, rpc_endpoint, self.PLACEMENT_RULE, await_mode=False, wait_for_creation=False)
|
||||||
wallet,
|
|
||||||
rule=placement_rule,
|
|
||||||
await_mode=False,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
wait_for_creation=False,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Wait for containers occur in container list"):
|
with reporter.step("Wait for containers occur in container list"):
|
||||||
for cid in cids:
|
for cid in cids:
|
||||||
wait_for_container_creation(
|
wait_for_container_creation(wallet, cid, self.shell, rpc_endpoint, sleep_interval=containers_count)
|
||||||
wallet,
|
|
||||||
cid,
|
|
||||||
sleep_interval=containers_count,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Delete containers and check they were deleted"):
|
with reporter.step("Delete containers and check they were deleted"):
|
||||||
for cid in cids:
|
for cid in cids:
|
||||||
delete_container(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, await_mode=True)
|
# Force to skip frostfs-cli verifictions before delete.
|
||||||
containers_list = list_containers(wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
# Because no APE rules assigned to container, those verifications will fail due to APE requests denial.
|
||||||
|
delete_container(wallet, cid, self.shell, rpc_endpoint, force=True, await_mode=True)
|
||||||
|
containers_list = list_containers(wallet, self.shell, rpc_endpoint)
|
||||||
assert cid not in containers_list, "Container not deleted"
|
assert cid not in containers_list, "Container not deleted"
|
||||||
|
|
File diff suppressed because it is too large
Load diff
640
pytest_tests/testsuites/container/test_policy_with_price.py
Normal file
640
pytest_tests/testsuites/container/test_policy_with_price.py
Normal file
|
@ -0,0 +1,640 @@
|
||||||
|
import itertools
|
||||||
|
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
|
from frostfs_testlib.shell.interfaces import Shell
|
||||||
|
from frostfs_testlib.steps.cli.container import delete_container
|
||||||
|
from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
|
||||||
|
from frostfs_testlib.steps.node_management import get_netmap_snapshot
|
||||||
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
|
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing import parallel
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
from frostfs_testlib.utils.cli_utils import parse_netmap_output
|
||||||
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ...helpers.container_creation import create_container_with_ape
|
||||||
|
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest
|
||||||
|
from ...helpers.policy_validation import get_netmap_param, validate_object_policy
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.weekly
|
||||||
|
@pytest.mark.policy
|
||||||
|
@pytest.mark.policy_price
|
||||||
|
class TestPolicyWithPrice(ClusterTestBase):
|
||||||
|
@wait_for_success(1200, 60, title="Wait for full field price on node", expected_result=True)
|
||||||
|
def await_for_price_attribute_on_nodes(self):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=self.cluster.storage_nodes[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
for node in self.cluster.storage_nodes:
|
||||||
|
node_address = node.get_rpc_endpoint().split(":")[0]
|
||||||
|
if netmap[node_address]["Price"] is None:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def fill_field_price(self, cluster: Cluster, cluster_state_controller_session: ClusterStateController):
|
||||||
|
prices = ["15", "10", "65", "55"]
|
||||||
|
|
||||||
|
config_manager = cluster_state_controller_session.manager(ConfigStateManager)
|
||||||
|
parallel(
|
||||||
|
config_manager.set_on_node,
|
||||||
|
cluster.cluster_nodes,
|
||||||
|
StorageNode,
|
||||||
|
itertools.cycle([{"node:attribute_5": f"Price:{price}"} for price in prices]),
|
||||||
|
)
|
||||||
|
cluster_state_controller_session.wait_after_storage_startup()
|
||||||
|
|
||||||
|
self.tick_epoch()
|
||||||
|
self.await_for_price_attribute_on_nodes()
|
||||||
|
|
||||||
|
yield
|
||||||
|
|
||||||
|
cluster_state_controller_session.manager(ConfigStateManager).revert_all()
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def container(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
client_shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
# In these set of tests containers should be created after the fill_field_price fixture
|
||||||
|
fill_field_price,
|
||||||
|
) -> str:
|
||||||
|
return create_container_with_ape(container_request, frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint)
|
||||||
|
|
||||||
|
@allure.title("Policy with SELECT and FILTER results with 25% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[PUBLIC_WITH_POLICY("REP 1 IN Nodes25 SELECT 1 FROM LE10 AS Nodes25 FILTER Price LE 10 AS LE10")],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_policy_with_select_and_filter_results_with_25_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 25% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"Price": 10}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 1
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0]
|
||||||
|
with reporter.step(f"Check the node is selected with price <= {placement_params['Price']}"):
|
||||||
|
assert (
|
||||||
|
int(netmap[node_address]["Price"]) <= placement_params["Price"]
|
||||||
|
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
||||||
|
|
||||||
|
@allure.title("Policy with select and complex filter results with 25% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[
|
||||||
|
PUBLIC_WITH_POLICY(
|
||||||
|
"REP 1 IN Nodes25 SELECT 1 FROM BET0AND10 AS Nodes25 FILTER Price LE 10 AS LE10 FILTER Price GT 0 AS GT0 FILTER @LE10 AND @GT0 AS BET0AND10"
|
||||||
|
)
|
||||||
|
],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_policy_with_select_and_complex_filter_results_with_25_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 25% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"Price": [10, 0]}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 1
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
with reporter.step(f"Check the node is selected with price between 1 and 10"):
|
||||||
|
for node in resulting_copies:
|
||||||
|
node_address = node.get_rpc_endpoint().split(":")[0]
|
||||||
|
assert (
|
||||||
|
int(netmap[node_address]["Price"]) > placement_params["Price"][1]
|
||||||
|
and int(netmap[node_address]["Price"]) <= placement_params["Price"][0]
|
||||||
|
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
||||||
|
|
||||||
|
@allure.title("Policy with Multi SELECTs and FILTERs results with 25% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[
|
||||||
|
PUBLIC_WITH_POLICY(
|
||||||
|
"UNIQUE REP 1 IN One REP 1 IN One CBF 1 SELECT 1 FROM MINMAX AS One FILTER Price LT 15 AS LT15 FILTER Price GT 55 AS GT55 FILTER @LT15 OR @GT55 AS MINMAX"
|
||||||
|
)
|
||||||
|
],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_policy_with_multi_selects_and_filters_results_with_25_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 25% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"Price": [15, 55]}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 2
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
with reporter.step(f"Check two nodes are selected with max and min prices"):
|
||||||
|
for node in resulting_copies:
|
||||||
|
node_address = node.get_rpc_endpoint().split(":")[0]
|
||||||
|
assert (
|
||||||
|
int(netmap[node_address]["Price"]) > placement_params["Price"][1]
|
||||||
|
or int(netmap[node_address]["Price"]) < placement_params["Price"][0]
|
||||||
|
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
||||||
|
|
||||||
|
@allure.title("Policy with SELECT and FILTER results with 50% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[PUBLIC_WITH_POLICY("REP 2 IN HALF CBF 1 SELECT 2 FROM GT15 AS HALF FILTER Price GT 15 AS GT15")],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_policy_with_select_and_filter_results_with_50_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 50% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"Price": 15}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 2
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
with reporter.step(f"Check two nodes are selected with price > {placement_params['Price']}"):
|
||||||
|
for node in resulting_copies:
|
||||||
|
node_address = node.get_rpc_endpoint().split(":")[0]
|
||||||
|
assert (
|
||||||
|
int(netmap[node_address]["Price"]) > placement_params["Price"]
|
||||||
|
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
||||||
|
|
||||||
|
@allure.title("Policy with SELECT and Complex FILTER results with 50% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[
|
||||||
|
PUBLIC_WITH_POLICY(
|
||||||
|
"REP 2 IN HALF CBF 2 SELECT 2 FROM GE15 AS HALF FILTER CountryCode NE RU AS NOTRU FILTER @NOTRU AND Price GE 15 AS GE15"
|
||||||
|
)
|
||||||
|
],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_policy_with_select_and_complex_filter_results_with_50_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 50% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"Price": 15, "country_code": "RU"}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 2
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
with reporter.step(f"Check two nodes are selected not with country code '{placement_params['country_code']}'"):
|
||||||
|
for node in resulting_copies:
|
||||||
|
node_address = node.get_rpc_endpoint().split(":")[0]
|
||||||
|
assert (
|
||||||
|
not netmap[node_address]["country_code"] == placement_params["country_code"]
|
||||||
|
or not netmap[node_address]["country_code"] == placement_params["country_code"]
|
||||||
|
and int(netmap[node_address]["Price"]) >= placement_params["Price"]
|
||||||
|
), f"The node is selected with the wrong price or country code. Got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
||||||
|
|
||||||
|
@allure.title("Policy with Multi SELECTs and FILTERs results with 50% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[
|
||||||
|
PUBLIC_WITH_POLICY(
|
||||||
|
"REP 2 IN FH REP 1 IN SH CBF 2 SELECT 2 FROM LE55 AS FH SELECT 2 FROM GE15 AS SH FILTER 'UN-LOCODE' EQ 'RU LED' OR 'UN-LOCODE' EQ 'RU MOW' AS RU FILTER NOT (@RU) AS NOTRU FILTER @NOTRU AND Price GE 15 AS GE15 FILTER @RU AND Price LE 55 AS LE55"
|
||||||
|
)
|
||||||
|
],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_policy_with_multi_selects_and_filters_results_with_50_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 50% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"un_locode": ["RU LED", "RU MOW"], "Price": [15, 55]}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 3
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
with reporter.step(f"Check all nodes are selected"):
|
||||||
|
for node in resulting_copies:
|
||||||
|
node_address = node.get_rpc_endpoint().split(":")[0]
|
||||||
|
assert (
|
||||||
|
netmap[node_address]["un_locode"] in placement_params["un_locode"]
|
||||||
|
or not netmap[node_address]["un_locode"] == placement_params["un_locode"][1]
|
||||||
|
or (
|
||||||
|
not netmap[node_address]["un_locode"] == placement_params["un_locode"][1]
|
||||||
|
and int(netmap[node_address]["Price"]) >= placement_params["Price"][0]
|
||||||
|
)
|
||||||
|
or (
|
||||||
|
netmap[node_address]["un_locode"] == placement_params["un_locode"][1]
|
||||||
|
and int(netmap[node_address]["Price"]) <= placement_params["Price"][1]
|
||||||
|
)
|
||||||
|
), f"The node is selected with the wrong price or un_locode. Expected {placement_params} and got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
||||||
|
|
||||||
|
@allure.title("Policy with SELECT and FILTER results with 75% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[PUBLIC_WITH_POLICY("REP 2 IN NODES75 SELECT 2 FROM LT65 AS NODES75 FILTER Price LT 65 AS LT65")],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_policy_with_select_and_filter_results_with_75_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 75% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"Price": 65}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 2
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
with reporter.step(f"Check two nodes are selected with price < {placement_params['Price']}"):
|
||||||
|
for node in resulting_copies:
|
||||||
|
node_address = node.get_rpc_endpoint().split(":")[0]
|
||||||
|
assert (
|
||||||
|
int(netmap[node_address]["Price"]) < placement_params["Price"]
|
||||||
|
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
||||||
|
|
||||||
|
@allure.title("Policy with SELECT and Complex FILTER results with 75% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[
|
||||||
|
PUBLIC_WITH_POLICY(
|
||||||
|
"REP 2 IN NODES75 SELECT 2 FROM LT65 AS NODES75 FILTER Continent NE America AS NOAM FILTER @NOAM AND Price LT 65 AS LT65"
|
||||||
|
)
|
||||||
|
],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_policy_with_select_and_complex_filter_results_with_75_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 75% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"Price": 65, "continent": "America"}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 2
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
with reporter.step(f"Check three nodes are selected not from {placement_params['continent']}"):
|
||||||
|
for node in resulting_copies:
|
||||||
|
node_address = node.get_rpc_endpoint().split(":")[0]
|
||||||
|
assert (
|
||||||
|
int(netmap[node_address]["Price"]) < placement_params["Price"]
|
||||||
|
and not netmap[node_address]["continent"] == placement_params["continent"]
|
||||||
|
) or (
|
||||||
|
not netmap[node_address]["continent"] == placement_params["continent"]
|
||||||
|
), f"The node is selected with the wrong price or continent. Got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
||||||
|
|
||||||
|
@allure.title("Policy with Multi SELECTs and FILTERs results with 75% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[
|
||||||
|
PUBLIC_WITH_POLICY(
|
||||||
|
"REP 3 IN EXPNSV REP 3 IN CHEAP SELECT 3 FROM GT10 AS EXPNSV SELECT 3 FROM LT65 AS CHEAP FILTER NOT (Continent EQ America) AS NOAM FILTER @NOAM AND Price LT 65 AS LT65 FILTER @NOAM AND Price GT 10 AS GT10"
|
||||||
|
)
|
||||||
|
],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_policy_with_multi_selects_and_filters_results_with_75_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 75% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"Price": [65, 10], "continent": "America"}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 4
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
with reporter.step(f"Check all nodes are selected"):
|
||||||
|
for node in resulting_copies:
|
||||||
|
node_address = node.get_rpc_endpoint().split(":")[0]
|
||||||
|
assert (
|
||||||
|
(
|
||||||
|
int(netmap[node_address]["Price"]) > placement_params["Price"][1]
|
||||||
|
and not netmap[node_address]["continent"] == placement_params["continent"]
|
||||||
|
)
|
||||||
|
or (
|
||||||
|
int(netmap[node_address]["Price"]) < placement_params["Price"][0]
|
||||||
|
and not netmap[node_address]["continent"] == placement_params["continent"]
|
||||||
|
)
|
||||||
|
or not (netmap[node_address]["continent"] == placement_params["continent"])
|
||||||
|
), f"The node is selected with the wrong price or continent. Got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
||||||
|
|
||||||
|
@allure.title("Policy with SELECT and FILTER results with 100% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request", [PUBLIC_WITH_POLICY("REP 1 IN All SELECT 4 FROM AllNodes AS All FILTER Price GE 0 AS AllNodes")], indirect=True
|
||||||
|
)
|
||||||
|
def test_policy_with_select_and_filter_results_with_100_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 100% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"Price": 0}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 1
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0]
|
||||||
|
with reporter.step(f"Check the node is selected with price >= {placement_params['Price']}"):
|
||||||
|
assert (
|
||||||
|
int(netmap[node_address]["Price"]) >= placement_params["Price"]
|
||||||
|
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
||||||
|
|
||||||
|
@allure.title("Policy with Multi SELECTs and FILTERs results with 100% of available nodes")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request",
|
||||||
|
[
|
||||||
|
PUBLIC_WITH_POLICY(
|
||||||
|
"REP 4 IN AllOne REP 4 IN AllTwo CBF 4 SELECT 2 FROM GEZero AS AllOne SELECT 2 FROM AllCountries AS AllTwo FILTER Country EQ Russia OR Country EQ Sweden OR Country EQ Finland AS AllCountries FILTER Price GE 0 AS GEZero"
|
||||||
|
)
|
||||||
|
],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_policy_with_multi_selects_and_filters_results_with_100_of_available_nodes(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 100% of available nodes.
|
||||||
|
"""
|
||||||
|
placement_params = {"country": ["Russia", "Sweden", "Finland"], "Price": 0}
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
expected_copies = 4
|
||||||
|
|
||||||
|
with reporter.step(f"Check container policy"):
|
||||||
|
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put object in container"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object expected copies"):
|
||||||
|
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
|
||||||
|
|
||||||
|
with reporter.step(f"Check the object appearance"):
|
||||||
|
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
|
||||||
|
netmap = get_netmap_param(netmap)
|
||||||
|
with reporter.step(f"Check all node are selected"):
|
||||||
|
for node in resulting_copies:
|
||||||
|
node_address = node.get_rpc_endpoint().split(":")[0]
|
||||||
|
assert (netmap[node_address]["country"] in placement_params["country"]) or (
|
||||||
|
int(netmap[node_address]["Price"]) >= placement_params["Price"]
|
||||||
|
), f"The node is selected from the wrong country or with wrong price. Got {netmap[node_address]}"
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the object from the container"):
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Delete the container"):
|
||||||
|
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
|
0
pytest_tests/testsuites/failovers/__init__.py
Normal file
0
pytest_tests/testsuites/failovers/__init__.py
Normal file
|
@ -6,11 +6,11 @@ import random
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
|
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
|
||||||
from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, put_object
|
from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, put_object
|
||||||
from frostfs_testlib.steps.node_management import check_node_in_map, check_node_not_in_map
|
from frostfs_testlib.steps.node_management import check_node_in_map, check_node_not_in_map
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
|
||||||
from frostfs_testlib.storage.controllers import ClusterStateController
|
from frostfs_testlib.storage.controllers import ClusterStateController
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
|
@ -21,6 +21,9 @@ from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
from frostfs_testlib.utils.file_utils import get_file_hash
|
from frostfs_testlib.utils.file_utils import get_file_hash
|
||||||
from pytest import FixtureRequest
|
from pytest import FixtureRequest
|
||||||
|
|
||||||
|
from ...helpers.container_creation import create_container_with_ape
|
||||||
|
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
|
@ -41,18 +44,21 @@ class TestFailoverServer(ClusterTestBase):
|
||||||
self,
|
self,
|
||||||
request: FixtureRequest,
|
request: FixtureRequest,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
|
cluster: Cluster,
|
||||||
) -> list[StorageContainer]:
|
) -> list[StorageContainer]:
|
||||||
|
|
||||||
placement_rule = "REP 2 CBF 2 SELECT 2 FROM *"
|
placement_rule = "REP 2 CBF 2 SELECT 2 FROM *"
|
||||||
|
container_request = ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL)
|
||||||
containers_count = request.param
|
containers_count = request.param
|
||||||
results = parallel(
|
results = parallel(
|
||||||
[create_container for _ in range(containers_count)],
|
[create_container_with_ape for _ in range(containers_count)],
|
||||||
|
container_request=container_request,
|
||||||
|
frostfs_cli=frostfs_cli,
|
||||||
wallet=default_wallet,
|
wallet=default_wallet,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
|
cluster=cluster,
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
endpoint=self.cluster.default_rpc_endpoint,
|
||||||
rule=placement_rule,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
containers = [
|
containers = [
|
||||||
|
@ -63,17 +69,18 @@ class TestFailoverServer(ClusterTestBase):
|
||||||
|
|
||||||
@allure.title("[Test] Create container")
|
@allure.title("[Test] Create container")
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
def container(self, default_wallet: WalletInfo) -> StorageContainer:
|
def container(self, default_wallet: WalletInfo, frostfs_cli: FrostfsCli) -> StorageContainer:
|
||||||
select = len(self.cluster.cluster_nodes)
|
select = len(self.cluster.cluster_nodes)
|
||||||
placement_rule = f"REP {select - 1} CBF 1 SELECT {select} FROM *"
|
placement_rule = f"REP {select - 1} CBF 1 SELECT {select} FROM *"
|
||||||
cont_id = create_container(
|
cid = create_container_with_ape(
|
||||||
|
ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL),
|
||||||
|
frostfs_cli,
|
||||||
default_wallet,
|
default_wallet,
|
||||||
shell=self.shell,
|
self.shell,
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
self.cluster,
|
||||||
rule=placement_rule,
|
self.cluster.default_rpc_endpoint,
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
)
|
||||||
storage_cont_info = StorageContainerInfo(cont_id, default_wallet)
|
storage_cont_info = StorageContainerInfo(cid, default_wallet)
|
||||||
return StorageContainer(storage_cont_info, self.shell, self.cluster)
|
return StorageContainer(storage_cont_info, self.shell, self.cluster)
|
||||||
|
|
||||||
@allure.title("[Class] Create objects")
|
@allure.title("[Class] Create objects")
|
||||||
|
@ -92,14 +99,25 @@ class TestFailoverServer(ClusterTestBase):
|
||||||
|
|
||||||
sizes_weights = [2, 1]
|
sizes_weights = [2, 1]
|
||||||
sizes = sizes_samples + random.choices(sizes_samples, weights=sizes_weights, k=object_count - samples_count)
|
sizes = sizes_samples + random.choices(sizes_samples, weights=sizes_weights, k=object_count - samples_count)
|
||||||
|
total_objects = len(containers) * object_count
|
||||||
|
|
||||||
results = parallel(
|
with reporter.step(f"Upload {total_objects} in total to containers"):
|
||||||
[container.generate_object for _ in sizes for container in containers],
|
results = parallel(
|
||||||
size=itertools.cycle([size.value for size in sizes]),
|
[self._generate_files_and_remove_physical_copy for _ in range(total_objects)],
|
||||||
)
|
container=itertools.cycle(containers),
|
||||||
|
size=itertools.cycle(sizes),
|
||||||
|
)
|
||||||
|
|
||||||
return [result.result() for result in results]
|
return [result.result() for result in results]
|
||||||
|
|
||||||
|
def _generate_files_and_remove_physical_copy(self, container: StorageContainer, size: ObjectSize) -> StorageObjectInfo:
|
||||||
|
storage_object = container.generate_object(size.value)
|
||||||
|
|
||||||
|
# Deliberately remove physical copy of the file for this test since it can generate multibytes of test objects
|
||||||
|
os.remove(storage_object.file_path)
|
||||||
|
|
||||||
|
return storage_object
|
||||||
|
|
||||||
@allure.title("[Test] Create objects and get nodes with object")
|
@allure.title("[Test] Create objects and get nodes with object")
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
def object_and_nodes(self, simple_object_size: ObjectSize, container: StorageContainer) -> tuple[StorageObjectInfo, list[ClusterNode]]:
|
def object_and_nodes(self, simple_object_size: ObjectSize, container: StorageContainer) -> tuple[StorageObjectInfo, list[ClusterNode]]:
|
||||||
|
@ -127,7 +145,7 @@ class TestFailoverServer(ClusterTestBase):
|
||||||
parallel(self._verify_object, storage_objects * len(nodes), node=itertools.cycle(nodes))
|
parallel(self._verify_object, storage_objects * len(nodes), node=itertools.cycle(nodes))
|
||||||
|
|
||||||
@allure.title("Full shutdown node")
|
@allure.title("Full shutdown node")
|
||||||
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
|
@pytest.mark.parametrize("containers, storage_objects", [(4, 5)], indirect=True)
|
||||||
def test_complete_node_shutdown(
|
def test_complete_node_shutdown(
|
||||||
self,
|
self,
|
||||||
storage_objects: list[StorageObjectInfo],
|
storage_objects: list[StorageObjectInfo],
|
||||||
|
@ -221,17 +239,19 @@ class TestFailoverServer(ClusterTestBase):
|
||||||
self,
|
self,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
cluster_state_controller: ClusterStateController,
|
cluster_state_controller: ClusterStateController,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
simple_file: str,
|
simple_file: str,
|
||||||
):
|
):
|
||||||
with reporter.step("Create container with full network map"):
|
with reporter.step("Create container with full network map"):
|
||||||
node_count = len(self.cluster.cluster_nodes)
|
node_count = len(self.cluster.cluster_nodes)
|
||||||
placement_rule = f"REP {node_count - 2} IN X CBF 2 SELECT {node_count} FROM * AS X"
|
placement_rule = f"REP {node_count - 2} IN X CBF 2 SELECT {node_count} FROM * AS X"
|
||||||
cid = create_container(
|
cid = create_container_with_ape(
|
||||||
|
ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL),
|
||||||
|
frostfs_cli,
|
||||||
default_wallet,
|
default_wallet,
|
||||||
self.shell,
|
self.shell,
|
||||||
|
self.cluster,
|
||||||
self.cluster.default_rpc_endpoint,
|
self.cluster.default_rpc_endpoint,
|
||||||
rule=placement_rule,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Put object"):
|
with reporter.step("Put object"):
|
||||||
|
@ -255,10 +275,4 @@ class TestFailoverServer(ClusterTestBase):
|
||||||
get_object(default_wallet, cid, oid_2, self.shell, alive_endpoint_with_object)
|
get_object(default_wallet, cid, oid_2, self.shell, alive_endpoint_with_object)
|
||||||
|
|
||||||
with reporter.step("Create container on alive node"):
|
with reporter.step("Create container on alive node"):
|
||||||
create_container(
|
create_container(default_wallet, self.shell, alive_endpoint_with_object, placement_rule)
|
||||||
default_wallet,
|
|
||||||
self.shell,
|
|
||||||
alive_endpoint_with_object,
|
|
||||||
rule=placement_rule,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
|
|
|
@ -7,8 +7,8 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||||
|
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||||
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
|
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
|
||||||
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
|
||||||
from frostfs_testlib.steps.node_management import (
|
from frostfs_testlib.steps.node_management import (
|
||||||
|
@ -33,6 +33,9 @@ from frostfs_testlib.utils.failover_utils import wait_object_replication
|
||||||
from frostfs_testlib.utils.file_keeper import FileKeeper
|
from frostfs_testlib.utils.file_keeper import FileKeeper
|
||||||
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
|
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
|
||||||
|
|
||||||
|
from ...helpers.container_request import REP_2_2_2_PUBLIC, requires_container
|
||||||
|
from ...resources.common import S3_POLICY_FILE_LOCATION
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
stopped_nodes: list[StorageNode] = []
|
stopped_nodes: list[StorageNode] = []
|
||||||
|
|
||||||
|
@ -51,32 +54,26 @@ class TestFailoverStorage(ClusterTestBase):
|
||||||
@allure.title("Shutdown and start node (stop_mode={stop_mode})")
|
@allure.title("Shutdown and start node (stop_mode={stop_mode})")
|
||||||
@pytest.mark.parametrize("stop_mode", ["hard", "soft"])
|
@pytest.mark.parametrize("stop_mode", ["hard", "soft"])
|
||||||
@pytest.mark.failover_reboot
|
@pytest.mark.failover_reboot
|
||||||
|
@requires_container(REP_2_2_2_PUBLIC)
|
||||||
def test_lose_storage_node_host(
|
def test_lose_storage_node_host(
|
||||||
self,
|
self,
|
||||||
default_wallet,
|
default_wallet,
|
||||||
stop_mode: str,
|
stop_mode: str,
|
||||||
|
container: str,
|
||||||
require_multiple_hosts,
|
require_multiple_hosts,
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
cluster: Cluster,
|
cluster: Cluster,
|
||||||
cluster_state_controller: ClusterStateController,
|
cluster_state_controller: ClusterStateController,
|
||||||
):
|
):
|
||||||
wallet = default_wallet
|
wallet = default_wallet
|
||||||
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
|
||||||
source_file_path = generate_file(simple_object_size.value)
|
source_file_path = generate_file(simple_object_size.value)
|
||||||
stopped_hosts_nodes = []
|
stopped_hosts_nodes = []
|
||||||
|
|
||||||
with reporter.step(f"Create container and put object"):
|
with reporter.step(f"Put object"):
|
||||||
cid = create_container(
|
oid = put_object_to_random_node(wallet, source_file_path, container, shell=self.shell, cluster=self.cluster)
|
||||||
wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=placement_rule,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
oid = put_object_to_random_node(wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster)
|
|
||||||
|
|
||||||
with reporter.step(f"Wait for replication and get nodes with object"):
|
with reporter.step(f"Wait for replication and get nodes with object"):
|
||||||
nodes_with_object = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
|
nodes_with_object = wait_object_replication(container, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
|
||||||
with reporter.step(f"Stop 2 nodes with object and wait replication one by one"):
|
with reporter.step(f"Stop 2 nodes with object and wait replication one by one"):
|
||||||
for storage_node in random.sample(nodes_with_object, 2):
|
for storage_node in random.sample(nodes_with_object, 2):
|
||||||
|
@ -86,7 +83,7 @@ class TestFailoverStorage(ClusterTestBase):
|
||||||
cluster_state_controller.stop_node_host(cluster_node, stop_mode)
|
cluster_state_controller.stop_node_host(cluster_node, stop_mode)
|
||||||
|
|
||||||
replicated_nodes = wait_object_replication(
|
replicated_nodes = wait_object_replication(
|
||||||
cid,
|
container,
|
||||||
oid,
|
oid,
|
||||||
2,
|
2,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
|
@ -94,22 +91,18 @@ class TestFailoverStorage(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Check object data is not corrupted"):
|
with reporter.step("Check object data is not corrupted"):
|
||||||
got_file_path = get_object(
|
got_file_path = get_object(wallet, container, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell)
|
||||||
wallet, cid, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell
|
|
||||||
)
|
|
||||||
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
|
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
|
||||||
|
|
||||||
with reporter.step("Return all hosts"):
|
with reporter.step("Return all hosts"):
|
||||||
cluster_state_controller.start_stopped_hosts()
|
cluster_state_controller.start_stopped_hosts()
|
||||||
|
|
||||||
with reporter.step("Check object data is not corrupted"):
|
with reporter.step("Check object data is not corrupted"):
|
||||||
replicated_nodes = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
|
replicated_nodes = wait_object_replication(container, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
got_file_path = get_object(
|
got_file_path = get_object(wallet, container, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint())
|
||||||
wallet, cid, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint()
|
|
||||||
)
|
|
||||||
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
|
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
|
||||||
|
|
||||||
@pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
|
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
|
||||||
@allure.title("Do not ignore unhealthy tree endpoints (s3_client={s3_client})")
|
@allure.title("Do not ignore unhealthy tree endpoints (s3_client={s3_client})")
|
||||||
def test_unhealthy_tree(
|
def test_unhealthy_tree(
|
||||||
self,
|
self,
|
||||||
|
@ -117,6 +110,7 @@ class TestFailoverStorage(ClusterTestBase):
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
cluster_state_controller: ClusterStateController,
|
cluster_state_controller: ClusterStateController,
|
||||||
|
bucket_container_resolver: BucketContainerResolver,
|
||||||
):
|
):
|
||||||
default_node = self.cluster.cluster_nodes[0]
|
default_node = self.cluster.cluster_nodes[0]
|
||||||
|
|
||||||
|
@ -149,6 +143,7 @@ class TestFailoverStorage(ClusterTestBase):
|
||||||
wallet=default_wallet,
|
wallet=default_wallet,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
|
endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
|
||||||
|
bucket_container_resolver=bucket_container_resolver,
|
||||||
)[0]
|
)[0]
|
||||||
|
|
||||||
with reporter.step("Turn off all storage nodes except bucket node"):
|
with reporter.step("Turn off all storage nodes except bucket node"):
|
||||||
|
@ -279,9 +274,7 @@ class TestEmptyMap(ClusterTestBase):
|
||||||
cluster_state_controller.stop_services_of_type(StorageNode)
|
cluster_state_controller.stop_services_of_type(StorageNode)
|
||||||
|
|
||||||
with reporter.step("Remove all nodes from network map"):
|
with reporter.step("Remove all nodes from network map"):
|
||||||
remove_nodes_from_map_morph(
|
remove_nodes_from_map_morph(shell=self.shell, cluster=self.cluster, remove_nodes=self.cluster.services(StorageNode))
|
||||||
shell=self.shell, cluster=self.cluster, remove_nodes=self.cluster.services(StorageNode)
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Return all storage nodes to network map"):
|
with reporter.step("Return all storage nodes to network map"):
|
||||||
self.return_nodes_after_stop_with_check_empty_map(cluster_state_controller)
|
self.return_nodes_after_stop_with_check_empty_map(cluster_state_controller)
|
||||||
|
@ -462,9 +455,7 @@ class TestStorageDataLoss(ClusterTestBase):
|
||||||
s3_client.put_object(bucket, complex_object_path)
|
s3_client.put_object(bucket, complex_object_path)
|
||||||
|
|
||||||
with reporter.step("Check objects are in bucket"):
|
with reporter.step("Check objects are in bucket"):
|
||||||
s3_helper.check_objects_in_bucket(
|
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[simple_object_key, complex_object_key])
|
||||||
s3_client, bucket, expected_objects=[simple_object_key, complex_object_key]
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Stop storage services on all nodes"):
|
with reporter.step("Stop storage services on all nodes"):
|
||||||
cluster_state_controller.stop_services_of_type(StorageNode)
|
cluster_state_controller.stop_services_of_type(StorageNode)
|
||||||
|
@ -578,17 +569,13 @@ class TestStorageDataLoss(ClusterTestBase):
|
||||||
exception_messages.append(f"Shard {shard} changed status to {status}")
|
exception_messages.append(f"Shard {shard} changed status to {status}")
|
||||||
|
|
||||||
with reporter.step("No related errors should be in log"):
|
with reporter.step("No related errors should be in log"):
|
||||||
if node_under_test.host.is_message_in_logs(
|
if node_under_test.host.is_message_in_logs(message_regex=r"\Wno such file or directory\W", since=test_start_time):
|
||||||
message_regex=r"\Wno such file or directory\W", since=test_start_time
|
|
||||||
):
|
|
||||||
exception_messages.append(f"Node {node_under_test} have shard errors in logs")
|
exception_messages.append(f"Node {node_under_test} have shard errors in logs")
|
||||||
|
|
||||||
with reporter.step("Pass test if no errors found"):
|
with reporter.step("Pass test if no errors found"):
|
||||||
assert not exception_messages, "\n".join(exception_messages)
|
assert not exception_messages, "\n".join(exception_messages)
|
||||||
|
|
||||||
@allure.title(
|
@allure.title("Loss of one node should trigger use of tree and storage service in another node (s3_client={s3_client})")
|
||||||
"Loss of one node should trigger use of tree and storage service in another node (s3_client={s3_client})"
|
|
||||||
)
|
|
||||||
def test_s3_one_endpoint_loss(
|
def test_s3_one_endpoint_loss(
|
||||||
self,
|
self,
|
||||||
bucket,
|
bucket,
|
||||||
|
@ -610,7 +597,7 @@ class TestStorageDataLoss(ClusterTestBase):
|
||||||
put_object = s3_client.put_object(bucket, file_path)
|
put_object = s3_client.put_object(bucket, file_path)
|
||||||
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
|
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
|
||||||
|
|
||||||
@pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
|
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
|
||||||
@allure.title("After Pilorama.db loss on one node object is retrievable (s3_client={s3_client})")
|
@allure.title("After Pilorama.db loss on one node object is retrievable (s3_client={s3_client})")
|
||||||
def test_s3_one_pilorama_loss(
|
def test_s3_one_pilorama_loss(
|
||||||
self,
|
self,
|
||||||
|
|
|
@ -6,10 +6,8 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
||||||
from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE, PUBLIC_ACL
|
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
from frostfs_testlib.steps.cli.container import create_container
|
||||||
from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, neo_go_query_height, put_object, put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, neo_go_query_height, put_object, put_object_to_random_node
|
||||||
from frostfs_testlib.steps.storage_object import delete_objects
|
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
from frostfs_testlib.storage.controllers import ClusterStateController
|
from frostfs_testlib.storage.controllers import ClusterStateController
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
@ -20,6 +18,8 @@ from frostfs_testlib.testing.parallel import parallel
|
||||||
from frostfs_testlib.utils.failover_utils import wait_object_replication
|
from frostfs_testlib.utils.failover_utils import wait_object_replication
|
||||||
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
|
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
|
||||||
|
|
||||||
|
from ...helpers.container_request import PUBLIC_WITH_POLICY, REP_2_2_2_PUBLIC, requires_container
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
STORAGE_NODE_COMMUNICATION_PORT = "8080"
|
STORAGE_NODE_COMMUNICATION_PORT = "8080"
|
||||||
STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082"
|
STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082"
|
||||||
|
@ -59,23 +59,13 @@ class TestFailoverNetwork(ClusterTestBase):
|
||||||
def storage_objects(
|
def storage_objects(
|
||||||
self,
|
self,
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
|
container: str,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
) -> list[StorageObjectInfo]:
|
) -> list[StorageObjectInfo]:
|
||||||
|
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
file_hash = get_file_hash(file_path)
|
file_hash = get_file_hash(file_path)
|
||||||
|
|
||||||
with reporter.step("Create container"):
|
|
||||||
placement_rule = "REP 1 CBF 1"
|
|
||||||
cid = create_container(
|
|
||||||
wallet=default_wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=placement_rule,
|
|
||||||
await_mode=True,
|
|
||||||
basic_acl=EACL_PUBLIC_READ_WRITE,
|
|
||||||
)
|
|
||||||
|
|
||||||
storage_objects = []
|
storage_objects = []
|
||||||
|
|
||||||
with reporter.step("Put object"):
|
with reporter.step("Put object"):
|
||||||
|
@ -83,12 +73,12 @@ class TestFailoverNetwork(ClusterTestBase):
|
||||||
oid = put_object_to_random_node(
|
oid = put_object_to_random_node(
|
||||||
wallet=default_wallet,
|
wallet=default_wallet,
|
||||||
path=file_path,
|
path=file_path,
|
||||||
cid=cid,
|
cid=container,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
cluster=self.cluster,
|
cluster=self.cluster,
|
||||||
)
|
)
|
||||||
|
|
||||||
storage_object = StorageObjectInfo(cid=cid, oid=oid)
|
storage_object = StorageObjectInfo(cid=container, oid=oid)
|
||||||
storage_object.size = simple_object_size.value
|
storage_object.size = simple_object_size.value
|
||||||
storage_object.wallet = default_wallet
|
storage_object.wallet = default_wallet
|
||||||
storage_object.file_path = file_path
|
storage_object.file_path = file_path
|
||||||
|
@ -100,9 +90,11 @@ class TestFailoverNetwork(ClusterTestBase):
|
||||||
return storage_objects
|
return storage_objects
|
||||||
|
|
||||||
@allure.title("Block Storage node traffic")
|
@allure.title("Block Storage node traffic")
|
||||||
|
@requires_container(REP_2_2_2_PUBLIC)
|
||||||
def test_block_storage_node_traffic(
|
def test_block_storage_node_traffic(
|
||||||
self,
|
self,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
|
container: str,
|
||||||
require_multiple_hosts,
|
require_multiple_hosts,
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
cluster_state_controller: ClusterStateController,
|
cluster_state_controller: ClusterStateController,
|
||||||
|
@ -111,21 +103,13 @@ class TestFailoverNetwork(ClusterTestBase):
|
||||||
Block storage nodes traffic using iptables and wait for replication for objects.
|
Block storage nodes traffic using iptables and wait for replication for objects.
|
||||||
"""
|
"""
|
||||||
wallet = default_wallet
|
wallet = default_wallet
|
||||||
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
|
||||||
wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked
|
wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked
|
||||||
nodes_to_block_count = 2
|
nodes_to_block_count = 2
|
||||||
|
|
||||||
source_file_path = generate_file(simple_object_size.value)
|
source_file_path = generate_file(simple_object_size.value)
|
||||||
cid = create_container(
|
oid = put_object_to_random_node(wallet, source_file_path, container, self.shell, self.cluster)
|
||||||
wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=placement_rule,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
oid = put_object_to_random_node(wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster)
|
|
||||||
|
|
||||||
nodes = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
|
nodes = wait_object_replication(container, oid, 2, self.shell, self.cluster.storage_nodes)
|
||||||
|
|
||||||
logger.info(f"Nodes are {nodes}")
|
logger.info(f"Nodes are {nodes}")
|
||||||
nodes_to_block = nodes
|
nodes_to_block = nodes
|
||||||
|
@ -147,7 +131,7 @@ class TestFailoverNetwork(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step(f"Check object is not stored on node {node}"):
|
with reporter.step(f"Check object is not stored on node {node}"):
|
||||||
new_nodes = wait_object_replication(
|
new_nodes = wait_object_replication(
|
||||||
cid,
|
container,
|
||||||
oid,
|
oid,
|
||||||
2,
|
2,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
|
@ -156,7 +140,7 @@ class TestFailoverNetwork(ClusterTestBase):
|
||||||
assert node.storage_node not in new_nodes
|
assert node.storage_node not in new_nodes
|
||||||
|
|
||||||
with reporter.step("Check object data is not corrupted"):
|
with reporter.step("Check object data is not corrupted"):
|
||||||
got_file_path = get_object(wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell)
|
got_file_path = get_object(wallet, container, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell)
|
||||||
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
|
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
|
||||||
|
|
||||||
with reporter.step(f"Unblock incoming traffic"):
|
with reporter.step(f"Unblock incoming traffic"):
|
||||||
|
@ -170,13 +154,14 @@ class TestFailoverNetwork(ClusterTestBase):
|
||||||
sleep(wakeup_node_timeout)
|
sleep(wakeup_node_timeout)
|
||||||
|
|
||||||
with reporter.step("Check object data is not corrupted"):
|
with reporter.step("Check object data is not corrupted"):
|
||||||
new_nodes = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
|
new_nodes = wait_object_replication(container, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
|
|
||||||
got_file_path = get_object(wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint())
|
got_file_path = get_object(wallet, container, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint())
|
||||||
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
|
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
|
||||||
|
|
||||||
@pytest.mark.interfaces
|
@pytest.mark.interfaces
|
||||||
@allure.title("Block DATA interface node")
|
@allure.title("Block DATA interface node")
|
||||||
|
@requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1 CBF 1"))
|
||||||
def test_block_data_interface(
|
def test_block_data_interface(
|
||||||
self,
|
self,
|
||||||
cluster_state_controller: ClusterStateController,
|
cluster_state_controller: ClusterStateController,
|
||||||
|
@ -284,7 +269,7 @@ class TestFailoverNetwork(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step(f"Get object nodes with object, expect true"):
|
with reporter.step(f"Get object nodes with object, expect true"):
|
||||||
input_file = get_object(
|
_ = get_object(
|
||||||
wallet=default_wallet,
|
wallet=default_wallet,
|
||||||
cid=storage_object.cid,
|
cid=storage_object.cid,
|
||||||
oid=storage_object.oid,
|
oid=storage_object.oid,
|
||||||
|
|
0
pytest_tests/testsuites/management/__init__.py
Normal file
0
pytest_tests/testsuites/management/__init__.py
Normal file
|
@ -1,7 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
import random
|
import random
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Callable, Optional, Tuple
|
from typing import Callable, Optional
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -10,7 +10,6 @@ from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.cli.netmap_parser import NetmapParser
|
from frostfs_testlib.cli.netmap_parser import NetmapParser
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
|
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
|
||||||
from frostfs_testlib.steps.cli.object import (
|
from frostfs_testlib.steps.cli.object import (
|
||||||
delete_object,
|
delete_object,
|
||||||
|
@ -44,7 +43,9 @@ from frostfs_testlib.utils import string_utils
|
||||||
from frostfs_testlib.utils.failover_utils import wait_object_replication
|
from frostfs_testlib.utils.failover_utils import wait_object_replication
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
|
from ...helpers.container_creation import create_container_with_ape
|
||||||
|
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, REP_1_1_1_PUBLIC, ContainerRequest, requires_container
|
||||||
|
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
check_nodes: list[StorageNode] = []
|
check_nodes: list[StorageNode] = []
|
||||||
|
@ -55,26 +56,16 @@ check_nodes: list[StorageNode] = []
|
||||||
@pytest.mark.order(10)
|
@pytest.mark.order(10)
|
||||||
class TestNodeManagement(ClusterTestBase):
|
class TestNodeManagement(ClusterTestBase):
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@allure.title("Create container and pick the node with data")
|
@allure.title("Pick the node with data")
|
||||||
def create_container_and_pick_node(self, default_wallet: WalletInfo, simple_object_size: ObjectSize) -> Tuple[str, StorageNode]:
|
def node_with_data(self, container: str, default_wallet: WalletInfo, simple_object_size: ObjectSize) -> StorageNode:
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
|
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
cid = create_container(
|
nodes = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
||||||
default_wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=endpoint,
|
|
||||||
rule=placement_rule,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, self.cluster)
|
|
||||||
|
|
||||||
nodes = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
|
||||||
assert len(nodes) == 1
|
assert len(nodes) == 1
|
||||||
node = nodes[0]
|
node = nodes[0]
|
||||||
|
|
||||||
yield cid, node
|
yield node
|
||||||
|
|
||||||
shards = node_shard_list(node)
|
shards = node_shard_list(node)
|
||||||
assert shards
|
assert shards
|
||||||
|
@ -126,6 +117,7 @@ class TestNodeManagement(ClusterTestBase):
|
||||||
self,
|
self,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
return_nodes_after_test_run,
|
return_nodes_after_test_run,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
|
@ -147,20 +139,16 @@ class TestNodeManagement(ClusterTestBase):
|
||||||
exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
|
exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
|
||||||
delete_node_data(random_node)
|
delete_node_data(random_node)
|
||||||
|
|
||||||
cid = create_container(
|
cid = create_container_with_ape(
|
||||||
wallet,
|
ContainerRequest(placement_rule_3, APE_EVERYONE_ALLOW_ALL),
|
||||||
rule=placement_rule_3,
|
frostfs_cli,
|
||||||
basic_acl=PUBLIC_ACL,
|
default_wallet,
|
||||||
shell=self.shell,
|
self.shell,
|
||||||
endpoint=alive_node.get_rpc_endpoint(),
|
self.cluster,
|
||||||
)
|
alive_node.get_rpc_endpoint(),
|
||||||
oid = put_object(
|
|
||||||
wallet,
|
|
||||||
source_file_path,
|
|
||||||
cid,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=alive_node.get_rpc_endpoint(),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
oid = put_object(wallet, source_file_path, cid, self.shell, alive_node.get_rpc_endpoint())
|
||||||
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
|
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
|
||||||
|
|
||||||
self.return_nodes(alive_node)
|
self.return_nodes(alive_node)
|
||||||
|
@ -182,12 +170,13 @@ class TestNodeManagement(ClusterTestBase):
|
||||||
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
|
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
|
||||||
|
|
||||||
with reporter.step("Check container could be created with new node"):
|
with reporter.step("Check container could be created with new node"):
|
||||||
cid = create_container(
|
cid = create_container_with_ape(
|
||||||
wallet,
|
ContainerRequest(placement_rule_4, APE_EVERYONE_ALLOW_ALL),
|
||||||
rule=placement_rule_4,
|
frostfs_cli,
|
||||||
basic_acl=PUBLIC_ACL,
|
default_wallet,
|
||||||
shell=self.shell,
|
self.shell,
|
||||||
endpoint=alive_node.get_rpc_endpoint(),
|
self.cluster,
|
||||||
|
alive_node.get_rpc_endpoint(),
|
||||||
)
|
)
|
||||||
oid = put_object(
|
oid = put_object(
|
||||||
wallet,
|
wallet,
|
||||||
|
@ -231,48 +220,53 @@ class TestNodeManagement(ClusterTestBase):
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Need to clarify scenario")
|
@pytest.mark.skip(reason="Need to clarify scenario")
|
||||||
@allure.title("Control Operations with storage nodes")
|
@allure.title("Control Operations with storage nodes")
|
||||||
|
@requires_container(REP_1_1_1_PUBLIC)
|
||||||
def test_shards(
|
def test_shards(
|
||||||
self,
|
self,
|
||||||
default_wallet,
|
default_wallet: WalletInfo,
|
||||||
create_container_and_pick_node,
|
container: str,
|
||||||
|
node_with_data: StorageNode,
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
):
|
):
|
||||||
wallet = default_wallet
|
wallet = default_wallet
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
|
||||||
cid, node = create_container_and_pick_node
|
original_oid = put_object_to_random_node(wallet, file_path, container, self.shell, self.cluster)
|
||||||
original_oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
|
|
||||||
|
|
||||||
# for mode in ('read-only', 'degraded'):
|
# for mode in ('read-only', 'degraded'):
|
||||||
for mode in ("degraded",):
|
for mode in ("degraded",):
|
||||||
shards = node_shard_list(node)
|
shards = node_shard_list(node_with_data)
|
||||||
assert shards
|
assert shards
|
||||||
|
|
||||||
for shard in shards:
|
for shard in shards:
|
||||||
node_shard_set_mode(node, shard, mode)
|
node_shard_set_mode(node_with_data, shard, mode)
|
||||||
|
|
||||||
shards = node_shard_list(node)
|
shards = node_shard_list(node_with_data)
|
||||||
assert shards
|
assert shards
|
||||||
|
|
||||||
|
# TODO: Add match for error
|
||||||
with pytest.raises(RuntimeError):
|
with pytest.raises(RuntimeError):
|
||||||
put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
|
put_object_to_random_node(wallet, file_path, container, self.shell, self.cluster)
|
||||||
|
|
||||||
|
# TODO: Add match for error
|
||||||
with pytest.raises(RuntimeError):
|
with pytest.raises(RuntimeError):
|
||||||
delete_object(wallet, cid, original_oid, self.shell, self.cluster.default_rpc_endpoint)
|
delete_object(wallet, container, original_oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
get_object_from_random_node(wallet, cid, original_oid, self.shell, self.cluster)
|
get_object_from_random_node(wallet, container, original_oid, self.shell, self.cluster)
|
||||||
|
|
||||||
for shard in shards:
|
for shard in shards:
|
||||||
node_shard_set_mode(node, shard, "read-write")
|
node_shard_set_mode(node_with_data, shard, "read-write")
|
||||||
|
|
||||||
shards = node_shard_list(node)
|
shards = node_shard_list(node_with_data)
|
||||||
assert shards
|
assert shards
|
||||||
|
|
||||||
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
|
oid = put_object_to_random_node(wallet, file_path, container, self.shell, self.cluster)
|
||||||
delete_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
delete_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
@allure.title("Put object with stopped node")
|
@allure.title("Put object with stopped node")
|
||||||
def test_stop_node(self, default_wallet, return_nodes_after_test_run, simple_object_size: ObjectSize):
|
def test_stop_node(
|
||||||
|
self, default_wallet: WalletInfo, frostfs_cli: FrostfsCli, return_nodes_after_test_run, simple_object_size: ObjectSize
|
||||||
|
):
|
||||||
wallet = default_wallet
|
wallet = default_wallet
|
||||||
placement_rule = "REP 3 IN X SELECT 4 FROM * AS X"
|
placement_rule = "REP 3 IN X SELECT 4 FROM * AS X"
|
||||||
source_file_path = generate_file(simple_object_size.value)
|
source_file_path = generate_file(simple_object_size.value)
|
||||||
|
@ -280,16 +274,20 @@ class TestNodeManagement(ClusterTestBase):
|
||||||
random_node = random.choice(storage_nodes[1:])
|
random_node = random.choice(storage_nodes[1:])
|
||||||
alive_node = random.choice([storage_node for storage_node in storage_nodes if storage_node.id != random_node.id])
|
alive_node = random.choice([storage_node for storage_node in storage_nodes if storage_node.id != random_node.id])
|
||||||
|
|
||||||
cid = create_container(
|
with reporter.step("Create container from random node endpoint"):
|
||||||
wallet,
|
cid = create_container_with_ape(
|
||||||
rule=placement_rule,
|
ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL),
|
||||||
basic_acl=PUBLIC_ACL,
|
frostfs_cli,
|
||||||
shell=self.shell,
|
default_wallet,
|
||||||
endpoint=random_node.get_rpc_endpoint(),
|
self.shell,
|
||||||
)
|
self.cluster,
|
||||||
|
random_node.get_rpc_endpoint(),
|
||||||
|
)
|
||||||
|
|
||||||
with reporter.step("Stop the random node"):
|
with reporter.step("Stop the random node"):
|
||||||
check_nodes.append(random_node)
|
check_nodes.append(random_node)
|
||||||
random_node.stop_service()
|
random_node.stop_service()
|
||||||
|
|
||||||
with reporter.step("Try to put an object and expect success"):
|
with reporter.step("Try to put an object and expect success"):
|
||||||
put_object(
|
put_object(
|
||||||
wallet,
|
wallet,
|
||||||
|
@ -364,7 +362,7 @@ class TestMaintenanceMode(ClusterTestBase):
|
||||||
cluster_state_controller: ClusterStateController,
|
cluster_state_controller: ClusterStateController,
|
||||||
restore_node_status: list[ClusterNode],
|
restore_node_status: list[ClusterNode],
|
||||||
):
|
):
|
||||||
with reporter.step("Create container and create\put object"):
|
with reporter.step("Create container and put object"):
|
||||||
cid = create_container(
|
cid = create_container(
|
||||||
wallet=default_wallet,
|
wallet=default_wallet,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
|
|
0
pytest_tests/testsuites/metrics/__init__.py
Normal file
0
pytest_tests/testsuites/metrics/__init__.py
Normal file
|
@ -1,122 +1,216 @@
|
||||||
import math
|
import math
|
||||||
import time
|
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
|
from frostfs_testlib.steps.cli.container import delete_container, search_nodes_with_container, wait_for_container_deletion
|
||||||
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object_to_random_node
|
||||||
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
|
from frostfs_testlib.steps.metrics import calc_metrics_count_from_stdout, check_metrics_counter, get_metrics_value
|
||||||
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.testing.parallel import parallel
|
||||||
|
from frostfs_testlib.utils.file_utils import TestFile, generate_file
|
||||||
|
|
||||||
|
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container
|
||||||
|
from ...helpers.utility import are_numbers_similar
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.container
|
@pytest.mark.nightly
|
||||||
|
@pytest.mark.metrics
|
||||||
class TestContainerMetrics(ClusterTestBase):
|
class TestContainerMetrics(ClusterTestBase):
|
||||||
@allure.title("Container metrics (obj_size={object_size},policy={policy})")
|
@reporter.step("Put object to container: {cid}")
|
||||||
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")])
|
def put_object_parallel(self, file_path: str, wallet: WalletInfo, cid: str):
|
||||||
|
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
|
||||||
|
return oid
|
||||||
|
|
||||||
|
@reporter.step("Get metrics value from node")
|
||||||
|
def get_metrics_search_by_greps_parallel(self, node: ClusterNode, **greps):
|
||||||
|
try:
|
||||||
|
content_stdout = node.metrics.storage.get_metrics_search_by_greps(greps)
|
||||||
|
return calc_metrics_count_from_stdout(content_stdout)
|
||||||
|
except Exception as e:
|
||||||
|
return None
|
||||||
|
|
||||||
|
@allure.title("Container metrics (obj_size={object_size}, policy={container_request})")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request, copies",
|
||||||
|
[
|
||||||
|
(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP"), 2),
|
||||||
|
(PUBLIC_WITH_POLICY("EC 1.1 CBF 1", short_name="EC"), 1),
|
||||||
|
],
|
||||||
|
indirect=["container_request"],
|
||||||
|
)
|
||||||
def test_container_metrics(
|
def test_container_metrics(
|
||||||
self,
|
self,
|
||||||
object_size: ObjectSize,
|
object_size: ObjectSize,
|
||||||
max_object_size: int,
|
max_object_size: int,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
cluster: Cluster,
|
cluster: Cluster,
|
||||||
placement_policy: str,
|
copies: int,
|
||||||
policy: str,
|
container: str,
|
||||||
|
test_file: TestFile,
|
||||||
|
container_request: ContainerRequest,
|
||||||
):
|
):
|
||||||
file_path = generate_file(object_size.value)
|
|
||||||
copies = 2 if policy == "REP" else 1
|
|
||||||
object_chunks = 1
|
object_chunks = 1
|
||||||
link_object = 0
|
link_object = 0
|
||||||
|
|
||||||
with reporter.step(f"Create container with policy {placement_policy}"):
|
|
||||||
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
|
|
||||||
|
|
||||||
if object_size.value > max_object_size:
|
if object_size.value > max_object_size:
|
||||||
object_chunks = math.ceil(object_size.value / max_object_size)
|
object_chunks = math.ceil(object_size.value / max_object_size)
|
||||||
link_object = len(search_nodes_with_container(default_wallet, cid, self.shell, cluster.default_rpc_endpoint, cluster))
|
link_object = len(search_nodes_with_container(default_wallet, container, self.shell, cluster.default_rpc_endpoint, cluster))
|
||||||
|
|
||||||
with reporter.step("Put object to random node"):
|
with reporter.step("Put object to random node"):
|
||||||
oid = put_object_to_random_node(
|
oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, cluster)
|
||||||
wallet=default_wallet,
|
|
||||||
path=file_path,
|
|
||||||
cid=cid,
|
|
||||||
shell=self.shell,
|
|
||||||
cluster=cluster,
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Get object nodes"):
|
with reporter.step("Get object nodes"):
|
||||||
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
|
||||||
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
||||||
|
|
||||||
with reporter.step("Check metric appears in node where the object is located"):
|
with reporter.step("Check metric appears in node where the object is located"):
|
||||||
count_metrics = (object_chunks * copies) + link_object
|
count_metrics = (object_chunks * copies) + link_object
|
||||||
if policy == "EC":
|
if container_request.short_name == "EC":
|
||||||
count_metrics = (object_chunks * 2) + link_object
|
count_metrics = (object_chunks * 2) + link_object
|
||||||
check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=cid, type="phy")
|
check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=container, type="phy")
|
||||||
check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=cid, type="logic")
|
check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=container, type="logic")
|
||||||
check_metrics_counter(object_nodes, counter_exp=copies, command="container_objects_total", cid=cid, type="user")
|
check_metrics_counter(object_nodes, counter_exp=copies, command="container_objects_total", cid=container, type="user")
|
||||||
|
|
||||||
with reporter.step("Delete file, wait until gc remove object"):
|
with reporter.step("Delete file, wait until gc remove object"):
|
||||||
delete_object(default_wallet, cid, oid, self.shell, cluster.default_rpc_endpoint)
|
delete_object(default_wallet, container, oid, self.shell, cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step(f"Check container metrics 'the counter should equal {len(object_nodes)}' in object nodes"):
|
with reporter.step(f"Check container metrics 'the counter should equal {len(object_nodes)}' in object nodes"):
|
||||||
check_metrics_counter(object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=cid, type="phy")
|
check_metrics_counter(object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=container, type="phy")
|
||||||
check_metrics_counter(object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=cid, type="logic")
|
check_metrics_counter(
|
||||||
check_metrics_counter(object_nodes, counter_exp=0, command="container_objects_total", cid=cid, type="user")
|
object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=container, type="logic"
|
||||||
|
)
|
||||||
|
check_metrics_counter(object_nodes, counter_exp=0, command="container_objects_total", cid=container, type="user")
|
||||||
|
|
||||||
with reporter.step("Check metrics(Phy, Logic, User) in each nodes"):
|
with reporter.step("Check metrics(Phy, Logic, User) in each nodes"):
|
||||||
# Phy and Logic metrics are 4, because in rule 'CBF 2 SELECT 2 FROM', cbf2*sel2=4
|
# Phy and Logic metrics are x2, because in rule 'CBF 2 SELECT 2 FROM', cbf2*sel2=4
|
||||||
expect_metrics = 4 if policy == "REP" else 2
|
expect_metrics = copies * 2
|
||||||
check_metrics_counter(cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=cid, type="phy")
|
|
||||||
check_metrics_counter(
|
check_metrics_counter(
|
||||||
cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=cid, type="logic"
|
cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=container, type="phy"
|
||||||
)
|
)
|
||||||
check_metrics_counter(cluster.cluster_nodes, counter_exp=0, command="container_objects_total", cid=cid, type="user")
|
check_metrics_counter(
|
||||||
|
cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=container, type="logic"
|
||||||
|
)
|
||||||
|
check_metrics_counter(cluster.cluster_nodes, counter_exp=0, command="container_objects_total", cid=container, type="user")
|
||||||
|
|
||||||
@allure.title("Container size metrics (obj_size={object_size},policy={policy})")
|
@allure.title("Container size metrics (obj_size={object_size}, policy={container_request})")
|
||||||
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")])
|
@requires_container(
|
||||||
|
[PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP"), PUBLIC_WITH_POLICY("EC 1.1 CBF 1", short_name="EC")]
|
||||||
|
)
|
||||||
def test_container_size_metrics(
|
def test_container_size_metrics(
|
||||||
self,
|
self,
|
||||||
object_size: ObjectSize,
|
object_size: ObjectSize,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
placement_policy: str,
|
test_file: TestFile,
|
||||||
policy: str,
|
container: str,
|
||||||
):
|
):
|
||||||
file_path = generate_file(object_size.value)
|
|
||||||
|
|
||||||
with reporter.step(f"Create container with policy {policy}"):
|
|
||||||
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy)
|
|
||||||
|
|
||||||
with reporter.step("Put object to random node"):
|
with reporter.step("Put object to random node"):
|
||||||
oid = put_object_to_random_node(
|
oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster)
|
||||||
wallet=default_wallet,
|
|
||||||
path=file_path,
|
|
||||||
cid=cid,
|
|
||||||
shell=self.shell,
|
|
||||||
cluster=self.cluster,
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Get object nodes"):
|
with reporter.step("Get object nodes"):
|
||||||
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, self.cluster.storage_nodes)
|
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
|
||||||
object_nodes = [
|
object_nodes = [
|
||||||
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes
|
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes
|
||||||
]
|
]
|
||||||
|
|
||||||
with reporter.step("Check metric appears in all node where the object is located"):
|
with reporter.step("Check metric appears in all node where the object is located"):
|
||||||
act_metric = sum(
|
act_metric = sum(
|
||||||
[get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=cid) for node in object_nodes]
|
[get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=container) for node in object_nodes]
|
||||||
)
|
)
|
||||||
assert (act_metric // 2) == object_size.value
|
assert (act_metric // 2) == object_size.value
|
||||||
|
|
||||||
with reporter.step("Delete file, wait until gc remove object"):
|
with reporter.step("Delete file, wait until gc remove object"):
|
||||||
id_tombstone = delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
id_tombstone = delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
tombstone = head_object(default_wallet, cid, id_tombstone, self.shell, self.cluster.default_rpc_endpoint)
|
tombstone = head_object(default_wallet, container, id_tombstone, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step(f"Check container size metrics"):
|
with reporter.step(f"Check container size metrics"):
|
||||||
act_metric = get_metrics_value(object_nodes[0], command="frostfs_node_engine_container_size_bytes", cid=cid)
|
act_metric = get_metrics_value(object_nodes[0], command="frostfs_node_engine_container_size_bytes", cid=container)
|
||||||
assert act_metric == int(tombstone["header"]["payloadLength"])
|
assert act_metric == int(tombstone["header"]["payloadLength"])
|
||||||
|
|
||||||
|
@allure.title("Container size metrics put {objects_count} objects (obj_size={object_size})")
|
||||||
|
@pytest.mark.parametrize("objects_count", [5, 10, 20])
|
||||||
|
@requires_container
|
||||||
|
def test_container_size_metrics_more_objects(
|
||||||
|
self, object_size: ObjectSize, default_wallet: WalletInfo, objects_count: int, container: str
|
||||||
|
):
|
||||||
|
with reporter.step(f"Put {objects_count} objects"):
|
||||||
|
files_path = [generate_file(object_size.value) for _ in range(objects_count)]
|
||||||
|
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=container)
|
||||||
|
oids = [future.result() for future in futures]
|
||||||
|
|
||||||
|
with reporter.step("Check metric appears in all nodes"):
|
||||||
|
metric_values = [
|
||||||
|
get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=container)
|
||||||
|
for node in self.cluster.cluster_nodes
|
||||||
|
]
|
||||||
|
actual_value = sum(metric_values) // 2 # for policy REP 2, value divide by 2
|
||||||
|
expected_value = object_size.value * objects_count
|
||||||
|
assert are_numbers_similar(
|
||||||
|
actual_value, expected_value, tolerance_percentage=2
|
||||||
|
), "metric container size bytes value not correct"
|
||||||
|
|
||||||
|
with reporter.step("Delete file, wait until gc remove object"):
|
||||||
|
tombstones_size = 0
|
||||||
|
for oid in oids:
|
||||||
|
tombstone_id = delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
tombstone = head_object(default_wallet, container, tombstone_id, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
tombstones_size += int(tombstone["header"]["payloadLength"])
|
||||||
|
|
||||||
|
with reporter.step(f"Check container size metrics, 'should be positive in all nodes'"):
|
||||||
|
futures = parallel(
|
||||||
|
get_metrics_value, self.cluster.cluster_nodes, command="frostfs_node_engine_container_size_bytes", cid=container
|
||||||
|
)
|
||||||
|
metrics_value_nodes = [future.result() for future in futures]
|
||||||
|
for act_metric in metrics_value_nodes:
|
||||||
|
assert act_metric >= 0, "Metrics value is negative"
|
||||||
|
assert sum(metrics_value_nodes) // len(self.cluster.cluster_nodes) == tombstones_size, "tomstone size of objects not correct"
|
||||||
|
|
||||||
|
@allure.title("Container metrics (policy={container_request})")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"container_request, copies",
|
||||||
|
[
|
||||||
|
(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP"), 2),
|
||||||
|
(PUBLIC_WITH_POLICY("EC 1.1 CBF 1", short_name="EC"), 1),
|
||||||
|
],
|
||||||
|
indirect=["container_request"],
|
||||||
|
)
|
||||||
|
def test_container_metrics_delete_complex_objects(
|
||||||
|
self,
|
||||||
|
complex_object_size: ObjectSize,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
cluster: Cluster,
|
||||||
|
copies: int,
|
||||||
|
container: str,
|
||||||
|
container_request: ContainerRequest,
|
||||||
|
):
|
||||||
|
objects_count = 2
|
||||||
|
metric_name = "frostfs_node_engine_container_objects_total"
|
||||||
|
|
||||||
|
with reporter.step(f"Put {objects_count} objects"):
|
||||||
|
files_path = [generate_file(complex_object_size.value) for _ in range(objects_count)]
|
||||||
|
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=container)
|
||||||
|
oids = [future.result() for future in futures]
|
||||||
|
|
||||||
|
with reporter.step(f"Check metrics value in each nodes, should be {objects_count} for 'user'"):
|
||||||
|
check_metrics_counter(
|
||||||
|
cluster.cluster_nodes, counter_exp=objects_count * copies, command=metric_name, cid=container, type="user"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Delete objects and container"):
|
||||||
|
for oid in oids:
|
||||||
|
delete_object(default_wallet, container, oid, self.shell, cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
delete_container(default_wallet, container, self.shell, cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step("Tick epoch and check container was deleted"):
|
||||||
|
self.tick_epoch()
|
||||||
|
wait_for_container_deletion(default_wallet, container, shell=self.shell, endpoint=cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metrics value in each nodes, should not be show any result"):
|
||||||
|
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name, cid=container)
|
||||||
|
metrics_results = [future.result() for future in futures if future.result() is not None]
|
||||||
|
assert len(metrics_results) == 0, f"Metrics value is not empty in Prometheus, actual value in nodes: {metrics_results}"
|
||||||
|
|
|
@ -1,11 +1,9 @@
|
||||||
import random
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
|
||||||
from frostfs_testlib.steps.cli.object import delete_object, put_object, put_object_to_random_node
|
|
||||||
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
|
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
|
||||||
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
@ -15,7 +13,11 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.testing.test_control import wait_for_success
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
|
@pytest.mark.metrics
|
||||||
class TestGarbageCollectorMetrics(ClusterTestBase):
|
class TestGarbageCollectorMetrics(ClusterTestBase):
|
||||||
@wait_for_success(interval=10)
|
@wait_for_success(interval=10)
|
||||||
def check_metrics_in_node(self, cluster_node: ClusterNode, counter_exp: int, **metrics_greps: str):
|
def check_metrics_in_node(self, cluster_node: ClusterNode, counter_exp: int, **metrics_greps: str):
|
||||||
|
@ -33,9 +35,11 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
|
||||||
return sum(map(int, result))
|
return sum(map(int, result))
|
||||||
|
|
||||||
@allure.title("Garbage collector expire_at object")
|
@allure.title("Garbage collector expire_at object")
|
||||||
def test_garbage_collector_metrics_expire_at_object(self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster):
|
@requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"))
|
||||||
|
def test_garbage_collector_metrics_expire_at_object(
|
||||||
|
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str
|
||||||
|
):
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
|
||||||
metrics_step = 1
|
metrics_step = 1
|
||||||
|
|
||||||
with reporter.step("Get current garbage collector metrics for each nodes"):
|
with reporter.step("Get current garbage collector metrics for each nodes"):
|
||||||
|
@ -43,22 +47,19 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
|
||||||
for node in cluster.cluster_nodes:
|
for node in cluster.cluster_nodes:
|
||||||
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total")
|
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total")
|
||||||
|
|
||||||
with reporter.step(f"Create container with policy {placement_policy}"):
|
|
||||||
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
|
|
||||||
|
|
||||||
with reporter.step("Put object to random node with expire_at"):
|
with reporter.step("Put object to random node with expire_at"):
|
||||||
current_epoch = self.get_epoch()
|
current_epoch = self.get_epoch()
|
||||||
oid = put_object_to_random_node(
|
oid = put_object_to_random_node(
|
||||||
default_wallet,
|
default_wallet,
|
||||||
file_path,
|
file_path,
|
||||||
cid,
|
container,
|
||||||
self.shell,
|
self.shell,
|
||||||
cluster,
|
cluster,
|
||||||
expire_at=current_epoch + 1,
|
expire_at=current_epoch + 1,
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Get object nodes"):
|
with reporter.step("Get object nodes"):
|
||||||
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
|
||||||
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
||||||
|
|
||||||
with reporter.step("Tick Epoch"):
|
with reporter.step("Tick Epoch"):
|
||||||
|
@ -76,9 +77,11 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Garbage collector delete object")
|
@allure.title("Garbage collector delete object")
|
||||||
def test_garbage_collector_metrics_deleted_objects(self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster):
|
@requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"))
|
||||||
|
def test_garbage_collector_metrics_deleted_objects(
|
||||||
|
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str
|
||||||
|
):
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
|
||||||
metrics_step = 1
|
metrics_step = 1
|
||||||
|
|
||||||
with reporter.step("Get current garbage collector metrics for each nodes"):
|
with reporter.step("Get current garbage collector metrics for each nodes"):
|
||||||
|
@ -86,24 +89,21 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
|
||||||
for node in cluster.cluster_nodes:
|
for node in cluster.cluster_nodes:
|
||||||
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total")
|
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total")
|
||||||
|
|
||||||
with reporter.step(f"Create container with policy {placement_policy}"):
|
|
||||||
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
|
||||||
|
|
||||||
with reporter.step("Put object to random node"):
|
with reporter.step("Put object to random node"):
|
||||||
oid = put_object_to_random_node(
|
oid = put_object_to_random_node(
|
||||||
default_wallet,
|
default_wallet,
|
||||||
file_path,
|
file_path,
|
||||||
cid,
|
container,
|
||||||
self.shell,
|
self.shell,
|
||||||
cluster,
|
cluster,
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Get object nodes"):
|
with reporter.step("Get object nodes"):
|
||||||
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
|
||||||
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
||||||
|
|
||||||
with reporter.step("Delete file, wait until gc remove object"):
|
with reporter.step("Delete file, wait until gc remove object"):
|
||||||
delete_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
|
delete_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}'"):
|
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}'"):
|
||||||
for node in object_nodes:
|
for node in object_nodes:
|
||||||
|
|
|
@ -18,6 +18,8 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
|
@pytest.mark.metrics
|
||||||
class TestGRPCMetrics(ClusterTestBase):
|
class TestGRPCMetrics(ClusterTestBase):
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def disable_policer(self, cluster_state_controller: ClusterStateController):
|
def disable_policer(self, cluster_state_controller: ClusterStateController):
|
||||||
|
@ -34,9 +36,7 @@ class TestGRPCMetrics(ClusterTestBase):
|
||||||
node = random.choice(cluster.cluster_nodes)
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
|
||||||
with reporter.step("Get current gRPC metrics for method 'Put'"):
|
with reporter.step("Get current gRPC metrics for method 'Put'"):
|
||||||
metrics_counter_put = get_metrics_value(
|
metrics_counter_put = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="Put")
|
||||||
node, command="grpc_server_handled_total", service="ContainerService", method="Put"
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step(f"Create container with policy {placement_policy}"):
|
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||||
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
||||||
|
@ -52,9 +52,7 @@ class TestGRPCMetrics(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Get current gRPC metrics for method 'Get'"):
|
with reporter.step("Get current gRPC metrics for method 'Get'"):
|
||||||
metrics_counter_get = get_metrics_value(
|
metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="Get")
|
||||||
node, command="grpc_server_handled_total", service="ContainerService", method="Get"
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step(f"Get container"):
|
with reporter.step(f"Get container"):
|
||||||
get_container(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
get_container(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
@ -70,9 +68,7 @@ class TestGRPCMetrics(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Get current gRPC metrics for method 'List'"):
|
with reporter.step("Get current gRPC metrics for method 'List'"):
|
||||||
metrics_counter_list = get_metrics_value(
|
metrics_counter_list = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="List")
|
||||||
node, command="grpc_server_handled_total", service="ContainerService", method="List"
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step(f"Get container list"):
|
with reporter.step(f"Get container list"):
|
||||||
list_containers(default_wallet, self.shell, node.storage_node.get_rpc_endpoint())
|
list_containers(default_wallet, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
@ -89,24 +85,18 @@ class TestGRPCMetrics(ClusterTestBase):
|
||||||
|
|
||||||
@allure.title("GRPC metrics object operations")
|
@allure.title("GRPC metrics object operations")
|
||||||
def test_grpc_metrics_object_operations(
|
def test_grpc_metrics_object_operations(
|
||||||
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, disable_policer
|
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str, disable_policer
|
||||||
):
|
):
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
|
||||||
|
|
||||||
with reporter.step("Select random node"):
|
with reporter.step("Select random node"):
|
||||||
node = random.choice(cluster.cluster_nodes)
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
|
||||||
with reporter.step(f"Create container with policy {placement_policy}"):
|
|
||||||
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
|
||||||
|
|
||||||
with reporter.step("Get current gRPC metrics for method 'Put'"):
|
with reporter.step("Get current gRPC metrics for method 'Put'"):
|
||||||
metrics_counter_put = get_metrics_value(
|
metrics_counter_put = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Put")
|
||||||
node, command="grpc_server_handled_total", service="ObjectService", method="Put"
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Put object to selected node"):
|
with reporter.step("Put object to selected node"):
|
||||||
oid = put_object(default_wallet, file_path, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
oid = put_object(default_wallet, file_path, container, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by 1'"):
|
with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by 1'"):
|
||||||
metrics_counter_put += 1
|
metrics_counter_put += 1
|
||||||
|
@ -119,12 +109,10 @@ class TestGRPCMetrics(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Get current gRPC metrics for method 'Get'"):
|
with reporter.step("Get current gRPC metrics for method 'Get'"):
|
||||||
metrics_counter_get = get_metrics_value(
|
metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Get")
|
||||||
node, command="grpc_server_handled_total", service="ObjectService", method="Get"
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step(f"Get object"):
|
with reporter.step(f"Get object"):
|
||||||
get_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
|
get_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"):
|
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"):
|
||||||
metrics_counter_get += 1
|
metrics_counter_get += 1
|
||||||
|
@ -137,12 +125,10 @@ class TestGRPCMetrics(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Get current gRPC metrics for method 'Search'"):
|
with reporter.step("Get current gRPC metrics for method 'Search'"):
|
||||||
metrics_counter_search = get_metrics_value(
|
metrics_counter_search = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Search")
|
||||||
node, command="grpc_server_handled_total", service="ObjectService", method="Search"
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step(f"Search object"):
|
with reporter.step(f"Search object"):
|
||||||
search_object(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
search_object(default_wallet, container, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
with reporter.step(f"Check gRPC metrics method=Search, 'the counter should increase by 1'"):
|
with reporter.step(f"Check gRPC metrics method=Search, 'the counter should increase by 1'"):
|
||||||
metrics_counter_search += 1
|
metrics_counter_search += 1
|
||||||
|
@ -155,12 +141,10 @@ class TestGRPCMetrics(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Get current gRPC metrics for method 'Head'"):
|
with reporter.step("Get current gRPC metrics for method 'Head'"):
|
||||||
metrics_counter_head = get_metrics_value(
|
metrics_counter_head = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Head")
|
||||||
node, command="grpc_server_handled_total", service="ObjectService", method="Head"
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step(f"Head object"):
|
with reporter.step(f"Head object"):
|
||||||
head_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
|
head_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
with reporter.step(f"Check gRPC metrics method=Head, 'the counter should increase by 1'"):
|
with reporter.step(f"Check gRPC metrics method=Head, 'the counter should increase by 1'"):
|
||||||
metrics_counter_head += 1
|
metrics_counter_head += 1
|
||||||
|
@ -178,9 +162,7 @@ class TestGRPCMetrics(ClusterTestBase):
|
||||||
node = random.choice(cluster.cluster_nodes)
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
|
||||||
with reporter.step("Get current gRPC metrics for Healthcheck"):
|
with reporter.step("Get current gRPC metrics for Healthcheck"):
|
||||||
metrics_counter = get_metrics_value(
|
metrics_counter = get_metrics_value(node, command="grpc_server_handled_total", service="TreeService", method="Healthcheck")
|
||||||
node, command="grpc_server_handled_total", service="TreeService", method="Healthcheck"
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Query Tree healthcheck status"):
|
with reporter.step("Query Tree healthcheck status"):
|
||||||
healthcheck.tree_healthcheck(node)
|
healthcheck.tree_healthcheck(node)
|
||||||
|
@ -206,9 +188,7 @@ class TestGRPCMetrics(ClusterTestBase):
|
||||||
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
||||||
|
|
||||||
with reporter.step("Get current gRPC metrics for Tree List"):
|
with reporter.step("Get current gRPC metrics for Tree List"):
|
||||||
metrics_counter = get_metrics_value(
|
metrics_counter = get_metrics_value(node, command="grpc_server_handled_total", service="TreeService", method="TreeList")
|
||||||
node, command="grpc_server_handled_total", service="TreeService", method="TreeList"
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Query Tree List"):
|
with reporter.step("Query Tree List"):
|
||||||
get_tree_list(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
get_tree_list(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
|
@ -6,7 +6,7 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.steps.metrics import get_metrics_value
|
from frostfs_testlib.steps.metrics import get_metrics_value
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
|
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
||||||
|
@ -14,6 +14,8 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.testing.test_control import wait_for_success
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
|
@pytest.mark.metrics
|
||||||
class TestLogsMetrics(ClusterTestBase):
|
class TestLogsMetrics(ClusterTestBase):
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def revert_all(self, cluster_state_controller: ClusterStateController):
|
def revert_all(self, cluster_state_controller: ClusterStateController):
|
||||||
|
@ -41,7 +43,7 @@ class TestLogsMetrics(ClusterTestBase):
|
||||||
logs = cluster_node.host.get_filtered_logs(
|
logs = cluster_node.host.get_filtered_logs(
|
||||||
log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority
|
log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority
|
||||||
)
|
)
|
||||||
result = re.findall(rf"Z\s+{log_level}\s+", logs)
|
result = re.findall(rf":\s+{log_level}\s+", logs)
|
||||||
count_logs += len(result)
|
count_logs += len(result)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
...
|
...
|
||||||
|
|
|
@ -4,72 +4,73 @@ import re
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.steps.cli.container import create_container, delete_container, search_nodes_with_container
|
from frostfs_testlib.steps.cli.container import delete_container, search_nodes_with_container
|
||||||
from frostfs_testlib.steps.cli.object import delete_object, lock_object, put_object, put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import delete_object, lock_object, put_object, put_object_to_random_node
|
||||||
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
|
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
|
||||||
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import TestFile
|
||||||
|
|
||||||
|
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
|
@pytest.mark.metrics
|
||||||
class TestObjectMetrics(ClusterTestBase):
|
class TestObjectMetrics(ClusterTestBase):
|
||||||
@allure.title("Object metrics of removed container (obj_size={object_size})")
|
@allure.title("Object metrics of removed container (obj_size={object_size})")
|
||||||
def test_object_metrics_removed_container(self, object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster):
|
@requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X"))
|
||||||
file_path = generate_file(object_size.value)
|
def test_object_metrics_removed_container(self, default_wallet: WalletInfo, cluster: Cluster, container: str, test_file: TestFile):
|
||||||
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
|
||||||
copies = 2
|
|
||||||
|
|
||||||
with reporter.step(f"Create container with policy {placement_policy}"):
|
|
||||||
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
|
|
||||||
|
|
||||||
with reporter.step("Put object to random node"):
|
with reporter.step("Put object to random node"):
|
||||||
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, cluster)
|
oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, cluster)
|
||||||
|
|
||||||
with reporter.step("Check metric appears in node where the object is located"):
|
with reporter.step("Check metric appears in node where the object is located"):
|
||||||
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
|
||||||
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
||||||
|
|
||||||
check_metrics_counter(
|
check_metrics_counter(
|
||||||
object_nodes,
|
object_nodes,
|
||||||
counter_exp=copies,
|
counter_exp=2,
|
||||||
command="frostfs_node_engine_container_objects_total",
|
command="frostfs_node_engine_container_objects_total",
|
||||||
cid=cid,
|
cid=container,
|
||||||
type="user",
|
type="user",
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Delete container"):
|
with reporter.step("Delete container"):
|
||||||
delete_container(default_wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
delete_container(default_wallet, container, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step("Tick Epoch"):
|
with reporter.step("Tick Epoch"):
|
||||||
self.tick_epochs(epochs_to_tick=2, wait_block=2)
|
self.tick_epochs(epochs_to_tick=2, wait_block=2)
|
||||||
|
|
||||||
with reporter.step("Check metrics of removed containers doesn't appear in the storage node"):
|
with reporter.step("Check metrics of removed containers doesn't appear in the storage node"):
|
||||||
check_metrics_counter(object_nodes, counter_exp=0, command="frostfs_node_engine_container_objects_total", cid=cid, type="user")
|
check_metrics_counter(
|
||||||
check_metrics_counter(object_nodes, counter_exp=0, command="frostfs_node_engine_container_size_byte", cid=cid)
|
object_nodes, counter_exp=0, command="frostfs_node_engine_container_objects_total", cid=container, type="user"
|
||||||
|
)
|
||||||
|
check_metrics_counter(object_nodes, counter_exp=0, command="frostfs_node_engine_container_size_byte", cid=container)
|
||||||
|
|
||||||
for node in object_nodes:
|
for node in object_nodes:
|
||||||
all_metrics = node.metrics.storage.get_metrics_search_by_greps(command="frostfs_node_engine_container_size_byte")
|
all_metrics = node.metrics.storage.get_metrics_search_by_greps(command="frostfs_node_engine_container_size_byte")
|
||||||
assert cid not in all_metrics.stdout, "metrics of removed containers shouldn't appear in the storage node"
|
assert container not in all_metrics.stdout, "metrics of removed containers shouldn't appear in the storage node"
|
||||||
|
|
||||||
@allure.title("Object metrics, locked object (obj_size={object_size}, policy={placement_policy})")
|
@allure.title("Object metrics, locked object (obj_size={object_size}, policy={container_request})")
|
||||||
@pytest.mark.parametrize("placement_policy", ["REP 1 IN X CBF 1 SELECT 1 FROM * AS X", "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"])
|
@requires_container(
|
||||||
|
[
|
||||||
|
PUBLIC_WITH_POLICY("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", short_name="REP 1"),
|
||||||
|
PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"),
|
||||||
|
]
|
||||||
|
)
|
||||||
def test_object_metrics_blocked_object(
|
def test_object_metrics_blocked_object(
|
||||||
self, object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, placement_policy: str
|
self, default_wallet: WalletInfo, cluster: Cluster, container: str, container_request: ContainerRequest, test_file: TestFile
|
||||||
):
|
):
|
||||||
file_path = generate_file(object_size.value)
|
metric_step = int(re.search(r"REP\s(\d+)", container_request.policy).group(1))
|
||||||
metric_step = int(re.search(r"REP\s(\d+)", placement_policy).group(1))
|
|
||||||
|
|
||||||
with reporter.step(f"Create container with policy {placement_policy}"):
|
|
||||||
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
|
|
||||||
|
|
||||||
with reporter.step("Search container nodes"):
|
with reporter.step("Search container nodes"):
|
||||||
container_nodes = search_nodes_with_container(
|
container_nodes = search_nodes_with_container(
|
||||||
wallet=default_wallet,
|
wallet=default_wallet,
|
||||||
cid=cid,
|
cid=container,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
endpoint=self.cluster.default_rpc_endpoint,
|
||||||
cluster=cluster,
|
cluster=cluster,
|
||||||
|
@ -81,7 +82,7 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
objects_metric_counter += get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
|
objects_metric_counter += get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
|
||||||
|
|
||||||
with reporter.step("Put object to container node"):
|
with reporter.step("Put object to container node"):
|
||||||
oid = put_object(default_wallet, file_path, cid, self.shell, container_nodes[0].storage_node.get_rpc_endpoint())
|
oid = put_object(default_wallet, test_file.path, container, self.shell, container_nodes[0].storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
|
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
|
||||||
objects_metric_counter += metric_step
|
objects_metric_counter += metric_step
|
||||||
|
@ -95,12 +96,12 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
container_nodes,
|
container_nodes,
|
||||||
counter_exp=metric_step,
|
counter_exp=metric_step,
|
||||||
command="frostfs_node_engine_container_objects_total",
|
command="frostfs_node_engine_container_objects_total",
|
||||||
cid=cid,
|
cid=container,
|
||||||
type="user",
|
type="user",
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Delete object"):
|
with reporter.step("Delete object"):
|
||||||
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
|
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
|
||||||
objects_metric_counter -= metric_step
|
objects_metric_counter -= metric_step
|
||||||
|
@ -114,16 +115,16 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
container_nodes,
|
container_nodes,
|
||||||
counter_exp=0,
|
counter_exp=0,
|
||||||
command="frostfs_node_engine_container_objects_total",
|
command="frostfs_node_engine_container_objects_total",
|
||||||
cid=cid,
|
cid=container,
|
||||||
type="user",
|
type="user",
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Put object and lock it to next epoch"):
|
with reporter.step("Put object and lock it to next epoch"):
|
||||||
oid = put_object(default_wallet, file_path, cid, self.shell, container_nodes[0].storage_node.get_rpc_endpoint())
|
oid = put_object(default_wallet, test_file.path, container, self.shell, container_nodes[0].storage_node.get_rpc_endpoint())
|
||||||
current_epoch = self.get_epoch()
|
current_epoch = self.get_epoch()
|
||||||
lock_object(
|
lock_object(
|
||||||
default_wallet,
|
default_wallet,
|
||||||
cid,
|
container,
|
||||||
oid,
|
oid,
|
||||||
self.shell,
|
self.shell,
|
||||||
container_nodes[0].storage_node.get_rpc_endpoint(),
|
container_nodes[0].storage_node.get_rpc_endpoint(),
|
||||||
|
@ -142,7 +143,7 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
container_nodes,
|
container_nodes,
|
||||||
counter_exp=metric_step,
|
counter_exp=metric_step,
|
||||||
command="frostfs_node_engine_container_objects_total",
|
command="frostfs_node_engine_container_objects_total",
|
||||||
cid=cid,
|
cid=container,
|
||||||
type="user",
|
type="user",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -156,7 +157,7 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Delete object"):
|
with reporter.step("Delete object"):
|
||||||
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
|
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
|
||||||
objects_metric_counter -= metric_step
|
objects_metric_counter -= metric_step
|
||||||
|
@ -170,7 +171,7 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
container_nodes,
|
container_nodes,
|
||||||
counter_exp=0,
|
counter_exp=0,
|
||||||
command="frostfs_node_engine_container_objects_total",
|
command="frostfs_node_engine_container_objects_total",
|
||||||
cid=cid,
|
cid=container,
|
||||||
type="user",
|
type="user",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -178,8 +179,8 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
current_epoch = self.get_epoch()
|
current_epoch = self.get_epoch()
|
||||||
oid = put_object(
|
oid = put_object(
|
||||||
default_wallet,
|
default_wallet,
|
||||||
file_path,
|
test_file.path,
|
||||||
cid,
|
container,
|
||||||
self.shell,
|
self.shell,
|
||||||
container_nodes[0].storage_node.get_rpc_endpoint(),
|
container_nodes[0].storage_node.get_rpc_endpoint(),
|
||||||
expire_at=current_epoch + 1,
|
expire_at=current_epoch + 1,
|
||||||
|
@ -197,7 +198,7 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
container_nodes,
|
container_nodes,
|
||||||
counter_exp=metric_step,
|
counter_exp=metric_step,
|
||||||
command="frostfs_node_engine_container_objects_total",
|
command="frostfs_node_engine_container_objects_total",
|
||||||
cid=cid,
|
cid=container,
|
||||||
type="user",
|
type="user",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -216,31 +217,28 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
container_nodes,
|
container_nodes,
|
||||||
counter_exp=0,
|
counter_exp=0,
|
||||||
command="frostfs_node_engine_container_objects_total",
|
command="frostfs_node_engine_container_objects_total",
|
||||||
cid=cid,
|
cid=container,
|
||||||
type="user",
|
type="user",
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Object metrics, stop the node (obj_size={object_size})")
|
@allure.title("Object metrics, stop the node (obj_size={object_size})")
|
||||||
|
@requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"))
|
||||||
def test_object_metrics_stop_node(
|
def test_object_metrics_stop_node(
|
||||||
self,
|
self,
|
||||||
object_size: ObjectSize,
|
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
cluster_state_controller: ClusterStateController,
|
cluster_state_controller: ClusterStateController,
|
||||||
|
container: str,
|
||||||
|
test_file: TestFile,
|
||||||
):
|
):
|
||||||
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
|
||||||
file_path = generate_file(object_size.value)
|
|
||||||
copies = 2
|
copies = 2
|
||||||
|
|
||||||
with reporter.step(f"Create container with policy {placement_policy}"):
|
|
||||||
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy)
|
|
||||||
|
|
||||||
with reporter.step(f"Check object metrics in container 'should be zero'"):
|
with reporter.step(f"Check object metrics in container 'should be zero'"):
|
||||||
check_metrics_counter(
|
check_metrics_counter(
|
||||||
self.cluster.cluster_nodes,
|
self.cluster.cluster_nodes,
|
||||||
counter_exp=0,
|
counter_exp=0,
|
||||||
command="frostfs_node_engine_container_objects_total",
|
command="frostfs_node_engine_container_objects_total",
|
||||||
type="user",
|
type="user",
|
||||||
cid=cid,
|
cid=container,
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Get current metrics for each nodes"):
|
with reporter.step("Get current metrics for each nodes"):
|
||||||
|
@ -249,10 +247,10 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
objects_metric_counter[node] = get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
|
objects_metric_counter[node] = get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
|
||||||
|
|
||||||
with reporter.step("Put object"):
|
with reporter.step("Put object"):
|
||||||
oid = put_object(default_wallet, file_path, cid, self.shell, self.cluster.default_rpc_endpoint)
|
oid = put_object(default_wallet, test_file.path, container, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step("Get object nodes"):
|
with reporter.step("Get object nodes"):
|
||||||
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, self.cluster.storage_nodes)
|
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
|
||||||
object_nodes = [
|
object_nodes = [
|
||||||
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes
|
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes
|
||||||
]
|
]
|
||||||
|
@ -265,7 +263,7 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
counter_exp=copies,
|
counter_exp=copies,
|
||||||
command="frostfs_node_engine_container_objects_total",
|
command="frostfs_node_engine_container_objects_total",
|
||||||
type="user",
|
type="user",
|
||||||
cid=cid,
|
cid=container,
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step(f"Select node to stop"):
|
with reporter.step(f"Select node to stop"):
|
||||||
|
@ -289,5 +287,5 @@ class TestObjectMetrics(ClusterTestBase):
|
||||||
counter_exp=copies,
|
counter_exp=copies,
|
||||||
command="frostfs_node_engine_container_objects_total",
|
command="frostfs_node_engine_container_objects_total",
|
||||||
type="user",
|
type="user",
|
||||||
cid=cid,
|
cid=container,
|
||||||
)
|
)
|
||||||
|
|
|
@ -5,8 +5,6 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
||||||
from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE
|
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.steps.cli.object import get_object, put_object
|
from frostfs_testlib.steps.cli.object import get_object, put_object
|
||||||
from frostfs_testlib.steps.metrics import check_metrics_counter
|
from frostfs_testlib.steps.metrics import check_metrics_counter
|
||||||
from frostfs_testlib.steps.node_management import node_shard_list, node_shard_set_mode
|
from frostfs_testlib.steps.node_management import node_shard_list, node_shard_set_mode
|
||||||
|
@ -18,7 +16,11 @@ from frostfs_testlib.testing import parallel, wait_for_success
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
|
@pytest.mark.metrics
|
||||||
class TestShardMetrics(ClusterTestBase):
|
class TestShardMetrics(ClusterTestBase):
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
@allure.title("Get two shards for set mode")
|
@allure.title("Get two shards for set mode")
|
||||||
|
@ -73,9 +75,7 @@ class TestShardMetrics(ClusterTestBase):
|
||||||
data_path = node.storage_node.get_data_directory()
|
data_path = node.storage_node.get_data_directory()
|
||||||
all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip()
|
all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip()
|
||||||
for data_dir in all_datas.replace(".", "").strip().split("\n"):
|
for data_dir in all_datas.replace(".", "").strip().split("\n"):
|
||||||
check_dir = node_shell.exec(
|
check_dir = node_shell.exec(f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0").stdout
|
||||||
f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0"
|
|
||||||
).stdout
|
|
||||||
if "1" in check_dir:
|
if "1" in check_dir:
|
||||||
object_path = f"{data_path}/data/{data_dir}/data/{oid_path}"
|
object_path = f"{data_path}/data/{data_dir}/data/{oid_path}"
|
||||||
object_name = f"{oid[4:]}.{cid}"
|
object_name = f"{oid[4:]}.{cid}"
|
||||||
|
@ -128,34 +128,22 @@ class TestShardMetrics(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Metric for error count on shard")
|
@allure.title("Metric for error count on shard")
|
||||||
|
@requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1"))
|
||||||
def test_shard_metrics_error_count(
|
def test_shard_metrics_error_count(
|
||||||
self, max_object_size: int, default_wallet: WalletInfo, cluster: Cluster, revert_all_shards_mode
|
self, max_object_size: int, default_wallet: WalletInfo, cluster: Cluster, container: str, revert_all_shards_mode
|
||||||
):
|
):
|
||||||
file_path = generate_file(round(max_object_size * 0.8))
|
file_path = generate_file(round(max_object_size * 0.8))
|
||||||
|
|
||||||
with reporter.step(f"Create container"):
|
|
||||||
cid = create_container(
|
|
||||||
wallet=default_wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=cluster.default_rpc_endpoint,
|
|
||||||
rule="REP 1 CBF 1",
|
|
||||||
basic_acl=EACL_PUBLIC_READ_WRITE,
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Put object"):
|
with reporter.step("Put object"):
|
||||||
oid = put_object(default_wallet, file_path, cid, self.shell, cluster.default_rpc_endpoint)
|
oid = put_object(default_wallet, file_path, container, self.shell, cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step("Get object nodes"):
|
with reporter.step("Get object nodes"):
|
||||||
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
|
||||||
object_nodes = [
|
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
||||||
cluster_node
|
|
||||||
for cluster_node in cluster.cluster_nodes
|
|
||||||
if cluster_node.storage_node in object_storage_nodes
|
|
||||||
]
|
|
||||||
node = random.choice(object_nodes)
|
node = random.choice(object_nodes)
|
||||||
|
|
||||||
with reporter.step("Search object in system."):
|
with reporter.step("Search object in system."):
|
||||||
object_path, object_name = self.get_object_path_and_name_file(oid, cid, node)
|
object_path, object_name = self.get_object_path_and_name_file(oid, container, node)
|
||||||
|
|
||||||
with reporter.step("Block read file"):
|
with reporter.step("Block read file"):
|
||||||
node.host.get_shell().exec(f"chmod a-r {object_path}/{object_name}")
|
node.host.get_shell().exec(f"chmod a-r {object_path}/{object_name}")
|
||||||
|
@ -164,7 +152,7 @@ class TestShardMetrics(ClusterTestBase):
|
||||||
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
|
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
|
||||||
get_object(
|
get_object(
|
||||||
wallet=default_wallet,
|
wallet=default_wallet,
|
||||||
cid=cid,
|
cid=container,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
endpoint=node.storage_node.get_rpc_endpoint(),
|
endpoint=node.storage_node.get_rpc_endpoint(),
|
||||||
|
|
0
pytest_tests/testsuites/object/__init__.py
Normal file
0
pytest_tests/testsuites/object/__init__.py
Normal file
|
@ -5,6 +5,7 @@ import sys
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.error_patterns import (
|
from frostfs_testlib.resources.error_patterns import (
|
||||||
INVALID_LENGTH_SPECIFIER,
|
INVALID_LENGTH_SPECIFIER,
|
||||||
INVALID_OFFSET_SPECIFIER,
|
INVALID_OFFSET_SPECIFIER,
|
||||||
|
@ -14,7 +15,7 @@ from frostfs_testlib.resources.error_patterns import (
|
||||||
OUT_OF_RANGE,
|
OUT_OF_RANGE,
|
||||||
)
|
)
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
|
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, search_nodes_with_container
|
||||||
from frostfs_testlib.steps.cli.object import (
|
from frostfs_testlib.steps.cli.object import (
|
||||||
get_object_from_random_node,
|
get_object_from_random_node,
|
||||||
get_range,
|
get_range,
|
||||||
|
@ -33,12 +34,16 @@ from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file, get_file_content, get_file_hash
|
from frostfs_testlib.utils.file_utils import TestFile, get_file_content, get_file_hash
|
||||||
|
|
||||||
|
from ...helpers.container_creation import create_container_with_ape
|
||||||
|
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
CLEANUP_TIMEOUT = 10
|
CLEANUP_TIMEOUT = 10
|
||||||
COMMON_ATTRIBUTE = {"common_key": "common_value"}
|
COMMON_ATTRIBUTE = {"common_key": "common_value"}
|
||||||
|
COMMON_CONTAINER_RULE = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
|
||||||
# Will upload object for each attribute set
|
# Will upload object for each attribute set
|
||||||
OBJECT_ATTRIBUTES = [
|
OBJECT_ATTRIBUTES = [
|
||||||
None,
|
None,
|
||||||
|
@ -54,9 +59,7 @@ RANGE_MAX_LEN = 500
|
||||||
STATIC_RANGES = {}
|
STATIC_RANGES = {}
|
||||||
|
|
||||||
|
|
||||||
def generate_ranges(
|
def generate_ranges(storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, cluster: Cluster) -> list[(int, int)]:
|
||||||
storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, cluster: Cluster
|
|
||||||
) -> list[(int, int)]:
|
|
||||||
file_range_step = storage_object.size / RANGES_COUNT
|
file_range_step = storage_object.size / RANGES_COUNT
|
||||||
|
|
||||||
file_ranges = []
|
file_ranges = []
|
||||||
|
@ -82,7 +85,7 @@ def generate_ranges(
|
||||||
|
|
||||||
for offset, length in file_ranges:
|
for offset, length in file_ranges:
|
||||||
range_length = random.randint(RANGE_MIN_LEN, RANGE_MAX_LEN)
|
range_length = random.randint(RANGE_MIN_LEN, RANGE_MAX_LEN)
|
||||||
range_start = random.randint(offset, offset + length)
|
range_start = random.randint(offset, offset + length - 1)
|
||||||
|
|
||||||
file_ranges_to_test.append((range_start, min(range_length, storage_object.size - range_start)))
|
file_ranges_to_test.append((range_start, min(range_length, storage_object.size - range_start)))
|
||||||
|
|
||||||
|
@ -92,21 +95,20 @@ def generate_ranges(
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def common_container(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster) -> str:
|
def common_container(
|
||||||
rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
|
frostfs_cli: FrostfsCli,
|
||||||
with reporter.step(f"Create container with {rule} and put object"):
|
default_wallet: WalletInfo,
|
||||||
cid = create_container(default_wallet, client_shell, cluster.default_rpc_endpoint, rule)
|
client_shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
return cid
|
rpc_endpoint: str,
|
||||||
|
) -> str:
|
||||||
|
container_request = ContainerRequest(COMMON_CONTAINER_RULE, APE_EVERYONE_ALLOW_ALL)
|
||||||
|
return create_container_with_ape(container_request, frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def container_nodes(
|
def container_nodes(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, common_container: str) -> list[ClusterNode]:
|
||||||
default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, common_container: str
|
return search_nodes_with_container(default_wallet, common_container, client_shell, cluster.default_rpc_endpoint, cluster)
|
||||||
) -> list[ClusterNode]:
|
|
||||||
return search_nodes_with_container(
|
|
||||||
default_wallet, common_container, client_shell, cluster.default_rpc_endpoint, cluster
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
|
@ -123,35 +125,35 @@ def storage_objects(
|
||||||
client_shell: Shell,
|
client_shell: Shell,
|
||||||
cluster: Cluster,
|
cluster: Cluster,
|
||||||
object_size: ObjectSize,
|
object_size: ObjectSize,
|
||||||
|
test_file_module: TestFile,
|
||||||
|
frostfs_cli: FrostfsCli,
|
||||||
placement_policy: PlacementPolicy,
|
placement_policy: PlacementPolicy,
|
||||||
|
rpc_endpoint: str,
|
||||||
) -> list[StorageObjectInfo]:
|
) -> list[StorageObjectInfo]:
|
||||||
wallet = default_wallet
|
cid = create_container_with_ape(
|
||||||
# Separate containers for complex/simple objects to avoid side-effects
|
ContainerRequest(placement_policy.value, APE_EVERYONE_ALLOW_ALL), frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint
|
||||||
cid = create_container(
|
|
||||||
wallet, shell=client_shell, rule=placement_policy.value, endpoint=cluster.default_rpc_endpoint
|
|
||||||
)
|
)
|
||||||
|
|
||||||
file_path = generate_file(object_size.value)
|
with reporter.step("Generate file"):
|
||||||
file_hash = get_file_hash(file_path)
|
file_hash = get_file_hash(test_file_module.path)
|
||||||
|
|
||||||
storage_objects = []
|
storage_objects = []
|
||||||
|
|
||||||
with reporter.step("Put objects"):
|
with reporter.step("Put objects"):
|
||||||
# We need to upload objects multiple times with different attributes
|
# We need to upload objects multiple times with different attributes
|
||||||
for attributes in OBJECT_ATTRIBUTES:
|
for attributes in OBJECT_ATTRIBUTES:
|
||||||
storage_object_id = put_object_to_random_node(
|
storage_object_id = put_object_to_random_node(
|
||||||
wallet=wallet,
|
default_wallet,
|
||||||
path=file_path,
|
test_file_module.path,
|
||||||
cid=cid,
|
cid,
|
||||||
shell=client_shell,
|
client_shell,
|
||||||
cluster=cluster,
|
cluster,
|
||||||
attributes=attributes,
|
attributes=attributes,
|
||||||
)
|
)
|
||||||
|
|
||||||
storage_object = StorageObjectInfo(cid, storage_object_id)
|
storage_object = StorageObjectInfo(cid, storage_object_id)
|
||||||
storage_object.size = object_size.value
|
storage_object.size = object_size.value
|
||||||
storage_object.wallet = wallet
|
storage_object.wallet = default_wallet
|
||||||
storage_object.file_path = file_path
|
storage_object.file_path = test_file_module.path
|
||||||
storage_object.file_hash = file_hash
|
storage_object.file_hash = file_hash
|
||||||
storage_object.attributes = attributes
|
storage_object.attributes = attributes
|
||||||
|
|
||||||
|
@ -170,6 +172,7 @@ def expected_object_copies(placement_policy: PlacementPolicy) -> int:
|
||||||
return 4
|
return 4
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
@pytest.mark.grpc_api
|
@pytest.mark.grpc_api
|
||||||
class TestObjectApi(ClusterTestBase):
|
class TestObjectApi(ClusterTestBase):
|
||||||
|
@ -248,23 +251,26 @@ class TestObjectApi(ClusterTestBase):
|
||||||
)
|
)
|
||||||
self.check_header_is_presented(head_info, storage_object_2.attributes)
|
self.check_header_is_presented(head_info, storage_object_2.attributes)
|
||||||
|
|
||||||
@allure.title("Head deleted object with --raw arg (obj_size={object_size}, policy={placement_policy})")
|
@allure.title("Head deleted object with --raw arg (obj_size={object_size}, policy={container_request})")
|
||||||
def test_object_head_raw(self, default_wallet: str, object_size: ObjectSize, placement_policy: PlacementPolicy):
|
@pytest.mark.parametrize(
|
||||||
with reporter.step("Create container"):
|
"container_request",
|
||||||
cid = create_container(
|
[
|
||||||
default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy.value
|
ContainerRequest(DEFAULT_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "rep"),
|
||||||
)
|
ContainerRequest(DEFAULT_EC_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "ec"),
|
||||||
|
],
|
||||||
|
indirect=True,
|
||||||
|
ids=["rep", "ec"],
|
||||||
|
)
|
||||||
|
def test_object_head_raw(self, default_wallet: str, container: str, test_file: TestFile):
|
||||||
with reporter.step("Upload object"):
|
with reporter.step("Upload object"):
|
||||||
file_path = generate_file(object_size.value)
|
oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster)
|
||||||
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, self.cluster)
|
|
||||||
|
|
||||||
with reporter.step("Delete object"):
|
with reporter.step("Delete object"):
|
||||||
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step("Call object head --raw and expect error"):
|
with reporter.step("Call object head --raw and expect error"):
|
||||||
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
|
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
|
||||||
head_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint, is_raw=True)
|
head_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint, is_raw=True)
|
||||||
|
|
||||||
@allure.title("Search objects by native API (obj_size={object_size}, policy={placement_policy})")
|
@allure.title("Search objects by native API (obj_size={object_size}, policy={placement_policy})")
|
||||||
def test_search_object_api(self, storage_objects: list[StorageObjectInfo]):
|
def test_search_object_api(self, storage_objects: list[StorageObjectInfo]):
|
||||||
|
@ -308,57 +314,51 @@ class TestObjectApi(ClusterTestBase):
|
||||||
assert sorted(expected_oids) == sorted(result)
|
assert sorted(expected_oids) == sorted(result)
|
||||||
|
|
||||||
@allure.title("Search objects with removed items (obj_size={object_size})")
|
@allure.title("Search objects with removed items (obj_size={object_size})")
|
||||||
def test_object_search_should_return_tombstone_items(self, default_wallet: WalletInfo, object_size: ObjectSize):
|
def test_object_search_should_return_tombstone_items(
|
||||||
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
container: str,
|
||||||
|
object_size: ObjectSize,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
test_file: TestFile,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Validate object search with removed items
|
Validate object search with removed items
|
||||||
"""
|
"""
|
||||||
|
|
||||||
wallet = default_wallet
|
with reporter.step("Put object to container"):
|
||||||
cid = create_container(wallet, self.shell, self.cluster.default_rpc_endpoint)
|
|
||||||
|
|
||||||
with reporter.step("Upload file"):
|
|
||||||
file_path = generate_file(object_size.value)
|
|
||||||
file_hash = get_file_hash(file_path)
|
|
||||||
|
|
||||||
storage_object = StorageObjectInfo(
|
storage_object = StorageObjectInfo(
|
||||||
cid=cid,
|
cid=container,
|
||||||
oid=put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster),
|
oid=put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster),
|
||||||
size=object_size.value,
|
size=object_size.value,
|
||||||
wallet=wallet,
|
wallet=default_wallet,
|
||||||
file_path=file_path,
|
file_path=test_file.path,
|
||||||
file_hash=file_hash,
|
file_hash=get_file_hash(test_file.path),
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Search object"):
|
with reporter.step("Search object"):
|
||||||
# Root Search object should return root object oid
|
# Root Search object should return root object oid
|
||||||
result = search_object(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True)
|
result = search_object(default_wallet, container, self.shell, rpc_endpoint, root=True)
|
||||||
assert result == [storage_object.oid]
|
objects_before_deletion = len(result)
|
||||||
|
assert storage_object.oid in result
|
||||||
|
|
||||||
with reporter.step("Delete file"):
|
with reporter.step("Delete object"):
|
||||||
delete_objects([storage_object], self.shell, self.cluster)
|
delete_objects([storage_object], self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Search deleted object with --root"):
|
with reporter.step("Search deleted object with --root"):
|
||||||
# Root Search object should return nothing
|
# Root Search object should return nothing
|
||||||
result = search_object(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True)
|
result = search_object(default_wallet, container, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True)
|
||||||
assert len(result) == 0
|
assert len(result) == 0
|
||||||
|
|
||||||
with reporter.step("Search deleted object with --phy should return only tombstones"):
|
with reporter.step("Search deleted object with --phy should return only tombstones"):
|
||||||
# Physical Search object should return only tombstones
|
# Physical Search object should return only tombstones
|
||||||
result = search_object(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, phy=True)
|
result = search_object(default_wallet, container, self.shell, rpc_endpoint, phy=True)
|
||||||
assert storage_object.tombstone in result, "Search result should contain tombstone of removed object"
|
assert storage_object.tombstone in result, "Search result should contain tombstone of removed object"
|
||||||
assert storage_object.oid not in result, "Search result should not contain ObjectId of removed object"
|
assert storage_object.oid not in result, "Search result should not contain ObjectId of removed object"
|
||||||
for tombstone_oid in result:
|
for tombstone_oid in result:
|
||||||
header = head_object(
|
head_info = head_object(default_wallet, container, tombstone_oid, self.shell, rpc_endpoint)
|
||||||
wallet,
|
object_type = head_info["header"]["objectType"]
|
||||||
cid,
|
assert object_type == "TOMBSTONE", f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}"
|
||||||
tombstone_oid,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
)["header"]
|
|
||||||
object_type = header["objectType"]
|
|
||||||
assert (
|
|
||||||
object_type == "TOMBSTONE"
|
|
||||||
), f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}"
|
|
||||||
|
|
||||||
@allure.title("Get range hash by native API (obj_size={object_size}, policy={placement_policy})")
|
@allure.title("Get range hash by native API (obj_size={object_size}, policy={placement_policy})")
|
||||||
@pytest.mark.grpc_api
|
@pytest.mark.grpc_api
|
||||||
|
@ -419,8 +419,7 @@ class TestObjectApi(ClusterTestBase):
|
||||||
range_cut=range_cut,
|
range_cut=range_cut,
|
||||||
)
|
)
|
||||||
assert (
|
assert (
|
||||||
get_file_content(file_path, content_len=range_len, mode="rb", offset=range_start)
|
get_file_content(file_path, content_len=range_len, mode="rb", offset=range_start) == range_content
|
||||||
== range_content
|
|
||||||
), f"Expected range content to match {range_cut} slice of file payload"
|
), f"Expected range content to match {range_cut} slice of file payload"
|
||||||
|
|
||||||
@allure.title("[NEGATIVE] Get invalid range by native API (obj_size={object_size}, policy={placement_policy})")
|
@allure.title("[NEGATIVE] Get invalid range by native API (obj_size={object_size}, policy={placement_policy})")
|
||||||
|
@ -438,9 +437,7 @@ class TestObjectApi(ClusterTestBase):
|
||||||
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
||||||
file_size = storage_objects[0].size
|
file_size = storage_objects[0].size
|
||||||
|
|
||||||
assert (
|
assert RANGE_MIN_LEN < file_size, f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
|
||||||
RANGE_MIN_LEN < file_size
|
|
||||||
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
|
|
||||||
|
|
||||||
file_ranges_to_test: list[tuple(int, int, str)] = [
|
file_ranges_to_test: list[tuple(int, int, str)] = [
|
||||||
# Offset is bigger than the file size, the length is small.
|
# Offset is bigger than the file size, the length is small.
|
||||||
|
@ -485,9 +482,7 @@ class TestObjectApi(ClusterTestBase):
|
||||||
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
||||||
file_size = storage_objects[0].size
|
file_size = storage_objects[0].size
|
||||||
|
|
||||||
assert (
|
assert RANGE_MIN_LEN < file_size, f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
|
||||||
RANGE_MIN_LEN < file_size
|
|
||||||
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
|
|
||||||
|
|
||||||
file_ranges_to_test: list[tuple(int, int, str)] = [
|
file_ranges_to_test: list[tuple(int, int, str)] = [
|
||||||
# Offset is bigger than the file size, the length is small.
|
# Offset is bigger than the file size, the length is small.
|
||||||
|
@ -530,9 +525,7 @@ class TestObjectApi(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Put object to container"):
|
with reporter.step("Put object to container"):
|
||||||
container_node = random.choice(container_nodes)
|
container_node = random.choice(container_nodes)
|
||||||
oid = put_object(
|
oid = put_object(default_wallet, file_path, common_container, self.shell, container_node.storage_node.get_rpc_endpoint())
|
||||||
default_wallet, file_path, common_container, self.shell, container_node.storage_node.get_rpc_endpoint()
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Get range from container node endpoint"):
|
with reporter.step("Get range from container node endpoint"):
|
||||||
get_range(
|
get_range(
|
||||||
|
@ -567,9 +560,7 @@ class TestObjectApi(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Put object to container"):
|
with reporter.step("Put object to container"):
|
||||||
container_node = random.choice(container_nodes)
|
container_node = random.choice(container_nodes)
|
||||||
oid = put_object(
|
oid = put_object(default_wallet, file_path, common_container, self.shell, container_node.storage_node.get_rpc_endpoint())
|
||||||
default_wallet, file_path, common_container, self.shell, container_node.storage_node.get_rpc_endpoint()
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Get range hash from container node endpoint"):
|
with reporter.step("Get range hash from container node endpoint"):
|
||||||
get_range_hash(
|
get_range_hash(
|
||||||
|
@ -595,6 +586,4 @@ class TestObjectApi(ClusterTestBase):
|
||||||
def check_header_is_presented(self, head_info: dict, object_header: dict) -> None:
|
def check_header_is_presented(self, head_info: dict, object_header: dict) -> None:
|
||||||
for key_to_check, val_to_check in object_header.items():
|
for key_to_check, val_to_check in object_header.items():
|
||||||
assert key_to_check in head_info["header"]["attributes"], f"Key {key_to_check} is found in {head_object}"
|
assert key_to_check in head_info["header"]["attributes"], f"Key {key_to_check} is found in {head_object}"
|
||||||
assert head_info["header"]["attributes"].get(key_to_check) == str(
|
assert head_info["header"]["attributes"].get(key_to_check) == str(val_to_check), f"Value {val_to_check} is equal"
|
||||||
val_to_check
|
|
||||||
), f"Value {val_to_check} is equal"
|
|
||||||
|
|
|
@ -2,14 +2,12 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.container import (
|
from frostfs_testlib.steps.cli.container import (
|
||||||
REP_2_FOR_3_NODES_PLACEMENT_RULE,
|
REP_2_FOR_3_NODES_PLACEMENT_RULE,
|
||||||
SINGLE_PLACEMENT_RULE,
|
SINGLE_PLACEMENT_RULE,
|
||||||
StorageContainer,
|
StorageContainer,
|
||||||
StorageContainerInfo,
|
StorageContainerInfo,
|
||||||
create_container,
|
|
||||||
)
|
)
|
||||||
from frostfs_testlib.steps.cli.object import delete_object, get_object
|
from frostfs_testlib.steps.cli.object import delete_object, get_object
|
||||||
from frostfs_testlib.steps.storage_object import StorageObjectInfo
|
from frostfs_testlib.steps.storage_object import StorageObjectInfo
|
||||||
|
@ -21,15 +19,20 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.testing.test_control import expect_not_raises
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
from pytest import FixtureRequest
|
from pytest import FixtureRequest
|
||||||
|
|
||||||
from pytest_tests.helpers.bearer_token import create_bearer_token
|
from ...helpers.bearer_token import create_bearer_token
|
||||||
from pytest_tests.helpers.container_access import assert_full_access_to_container
|
from ...helpers.container_access import assert_full_access_to_container
|
||||||
|
from ...helpers.container_creation import create_container_with_ape
|
||||||
|
from ...helpers.container_request import ContainerRequest
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
@allure.title("Create user container for bearer token usage")
|
@allure.title("Create user container for bearer token usage")
|
||||||
def user_container(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, request: FixtureRequest) -> StorageContainer:
|
def user_container(
|
||||||
rule = request.param if "param" in request.__dict__ else SINGLE_PLACEMENT_RULE
|
frostfs_cli: FrostfsCli, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, rpc_endpoint: str, request: FixtureRequest
|
||||||
container_id = create_container(default_wallet, client_shell, cluster.default_rpc_endpoint, rule, PUBLIC_ACL)
|
) -> StorageContainer:
|
||||||
|
policy = request.param if "param" in request.__dict__ else SINGLE_PLACEMENT_RULE
|
||||||
|
container_request = ContainerRequest(policy)
|
||||||
|
container_id = create_container_with_ape(container_request, frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint)
|
||||||
|
|
||||||
# Deliberately using s3gate wallet here to test bearer token
|
# Deliberately using s3gate wallet here to test bearer token
|
||||||
s3_gate_wallet = WalletInfo.from_node(cluster.s3_gates[0])
|
s3_gate_wallet = WalletInfo.from_node(cluster.s3_gates[0])
|
||||||
|
@ -62,9 +65,10 @@ def storage_objects(
|
||||||
return storage_objects
|
return storage_objects
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.smoke
|
@pytest.mark.nightly
|
||||||
@pytest.mark.bearer
|
@pytest.mark.bearer
|
||||||
@pytest.mark.ape
|
@pytest.mark.ape
|
||||||
|
@pytest.mark.grpc_api
|
||||||
class TestObjectApiWithBearerToken(ClusterTestBase):
|
class TestObjectApiWithBearerToken(ClusterTestBase):
|
||||||
@allure.title("Object can be deleted from any node using s3gate wallet with bearer token (obj_size={object_size})")
|
@allure.title("Object can be deleted from any node using s3gate wallet with bearer token (obj_size={object_size})")
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
|
|
|
@ -4,52 +4,47 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.steps.cli.object import get_object_from_random_node, head_object, put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import get_object_from_random_node, head_object, put_object_to_random_node
|
||||||
from frostfs_testlib.steps.epoch import get_epoch
|
from frostfs_testlib.steps.epoch import get_epoch
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
|
from frostfs_testlib.utils.file_utils import TestFile
|
||||||
|
|
||||||
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
|
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
@pytest.mark.grpc_api
|
@pytest.mark.grpc_api
|
||||||
class TestObjectApiLifetime(ClusterTestBase):
|
class TestObjectApiLifetime(ClusterTestBase):
|
||||||
@allure.title("Object is removed when lifetime expired (obj_size={object_size})")
|
@allure.title("Object is removed when lifetime expired (obj_size={object_size})")
|
||||||
def test_object_api_lifetime(self, default_wallet: WalletInfo, object_size: ObjectSize):
|
def test_object_api_lifetime(self, container: str, test_file: TestFile, default_wallet: WalletInfo):
|
||||||
"""
|
"""
|
||||||
Test object deleted after expiration epoch.
|
Test object deleted after expiration epoch.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
wallet = default_wallet
|
wallet = default_wallet
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
cid = create_container(wallet, self.shell, endpoint)
|
|
||||||
|
|
||||||
file_path = generate_file(object_size.value)
|
|
||||||
file_hash = get_file_hash(file_path)
|
|
||||||
epoch = get_epoch(self.shell, self.cluster)
|
epoch = get_epoch(self.shell, self.cluster)
|
||||||
|
|
||||||
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster, expire_at=epoch + 1)
|
oid = put_object_to_random_node(wallet, test_file.path, container, self.shell, self.cluster, expire_at=epoch + 1)
|
||||||
got_file = get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)
|
with expect_not_raises():
|
||||||
assert get_file_hash(got_file) == file_hash
|
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
with reporter.step("Tick two epochs"):
|
with reporter.step("Tick two epochs"):
|
||||||
for _ in range(2):
|
self.tick_epochs(2)
|
||||||
self.tick_epoch()
|
|
||||||
|
|
||||||
# Wait for GC, because object with expiration is counted as alive until GC removes it
|
# Wait for GC, because object with expiration is counted as alive until GC removes it
|
||||||
wait_for_gc_pass_on_storage_nodes()
|
wait_for_gc_pass_on_storage_nodes()
|
||||||
|
|
||||||
with reporter.step("Check object deleted because it expires on epoch"):
|
with reporter.step("Check object deleted because it expires on epoch"):
|
||||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||||
head_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||||
get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)
|
get_object_from_random_node(wallet, container, oid, self.shell, self.cluster)
|
||||||
|
|
||||||
with reporter.step("Tick additional epoch"):
|
with reporter.step("Tick additional epoch"):
|
||||||
self.tick_epoch()
|
self.tick_epoch()
|
||||||
|
@ -58,6 +53,6 @@ class TestObjectApiLifetime(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Check object deleted because it expires on previous epoch"):
|
with reporter.step("Check object deleted because it expires on previous epoch"):
|
||||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||||
head_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||||
get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)
|
get_object_from_random_node(wallet, container, oid, self.shell, self.cluster)
|
||||||
|
|
|
@ -1,26 +1,23 @@
|
||||||
import logging
|
import logging
|
||||||
import re
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.common import STORAGE_GC_TIME
|
from frostfs_testlib.resources.common import STORAGE_GC_TIME
|
||||||
from frostfs_testlib.resources.error_patterns import (
|
from frostfs_testlib.resources.error_patterns import (
|
||||||
LIFETIME_REQUIRED,
|
LIFETIME_REQUIRED,
|
||||||
LOCK_NON_REGULAR_OBJECT,
|
LOCK_NON_REGULAR_OBJECT,
|
||||||
LOCK_OBJECT_EXPIRATION,
|
LOCK_OBJECT_EXPIRATION,
|
||||||
LOCK_OBJECT_REMOVAL,
|
LOCK_OBJECT_REMOVAL,
|
||||||
OBJECT_ALREADY_REMOVED,
|
|
||||||
OBJECT_IS_LOCKED,
|
OBJECT_IS_LOCKED,
|
||||||
OBJECT_NOT_FOUND,
|
OBJECT_NOT_FOUND,
|
||||||
)
|
)
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
|
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo
|
||||||
from frostfs_testlib.steps.cli.object import delete_object, head_object, lock_object
|
from frostfs_testlib.steps.cli.object import delete_object, head_object, lock_object
|
||||||
from frostfs_testlib.steps.complex_object_actions import get_link_object, get_storage_object_chunks
|
from frostfs_testlib.steps.complex_object_actions import get_link_object, get_storage_object_chunks
|
||||||
from frostfs_testlib.steps.epoch import ensure_fresh_epoch, get_epoch, tick_epoch
|
from frostfs_testlib.steps.epoch import ensure_fresh_epoch
|
||||||
from frostfs_testlib.steps.node_management import drop_object
|
from frostfs_testlib.steps.node_management import drop_object
|
||||||
from frostfs_testlib.steps.storage_object import delete_objects
|
from frostfs_testlib.steps.storage_object import delete_objects
|
||||||
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
|
@ -32,7 +29,9 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success
|
from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success
|
||||||
from frostfs_testlib.utils import datetime_utils
|
from frostfs_testlib.utils import datetime_utils
|
||||||
|
|
||||||
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
|
from ...helpers.container_creation import create_container_with_ape
|
||||||
|
from ...helpers.container_request import EVERYONE_ALLOW_ALL
|
||||||
|
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
@ -41,25 +40,15 @@ FIXTURE_OBJECT_LIFETIME = 10
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def user_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
|
def user_container(
|
||||||
with reporter.step("Create user wallet with container"):
|
frostfs_cli: FrostfsCli, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, rpc_endpoint: str
|
||||||
user = User(f"user_{hex(int(datetime.now().timestamp() * 1000000))}")
|
) -> StorageContainer:
|
||||||
return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
|
cid = create_container_with_ape(EVERYONE_ALLOW_ALL, frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint)
|
||||||
|
return StorageContainer(StorageContainerInfo(cid, default_wallet), client_shell, cluster)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def user_container(user_wallet: WalletInfo, client_shell: Shell, cluster: Cluster):
|
def locked_storage_object(user_container: StorageContainer, client_shell: Shell, cluster: Cluster, object_size: ObjectSize):
|
||||||
container_id = create_container(user_wallet, shell=client_shell, endpoint=cluster.default_rpc_endpoint)
|
|
||||||
return StorageContainer(StorageContainerInfo(container_id, user_wallet), client_shell, cluster)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
|
||||||
def locked_storage_object(
|
|
||||||
user_container: StorageContainer,
|
|
||||||
client_shell: Shell,
|
|
||||||
cluster: Cluster,
|
|
||||||
object_size: ObjectSize,
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Intention of this fixture is to provide storage object which is NOT expected to be deleted during test act phase
|
Intention of this fixture is to provide storage object which is NOT expected to be deleted during test act phase
|
||||||
"""
|
"""
|
||||||
|
@ -67,9 +56,7 @@ def locked_storage_object(
|
||||||
current_epoch = ensure_fresh_epoch(client_shell, cluster)
|
current_epoch = ensure_fresh_epoch(client_shell, cluster)
|
||||||
expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME
|
expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME
|
||||||
|
|
||||||
storage_object = user_container.generate_object(
|
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME)
|
||||||
object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
|
|
||||||
)
|
|
||||||
lock_object_id = lock_object(
|
lock_object_id = lock_object(
|
||||||
storage_object.wallet,
|
storage_object.wallet,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
|
@ -78,59 +65,24 @@ def locked_storage_object(
|
||||||
cluster.default_rpc_endpoint,
|
cluster.default_rpc_endpoint,
|
||||||
lifetime=FIXTURE_LOCK_LIFETIME,
|
lifetime=FIXTURE_LOCK_LIFETIME,
|
||||||
)
|
)
|
||||||
storage_object.locks = [
|
storage_object.locks = [LockObjectInfo(storage_object.cid, lock_object_id, FIXTURE_LOCK_LIFETIME, expiration_epoch)]
|
||||||
LockObjectInfo(storage_object.cid, lock_object_id, FIXTURE_LOCK_LIFETIME, expiration_epoch)
|
|
||||||
]
|
|
||||||
|
|
||||||
yield storage_object
|
return storage_object
|
||||||
|
|
||||||
with reporter.step("Delete created locked object"):
|
|
||||||
current_epoch = get_epoch(client_shell, cluster)
|
|
||||||
epoch_diff = expiration_epoch - current_epoch + 1
|
|
||||||
|
|
||||||
if epoch_diff > 0:
|
|
||||||
with reporter.step(f"Tick {epoch_diff} epochs"):
|
|
||||||
for _ in range(epoch_diff):
|
|
||||||
tick_epoch(client_shell, cluster)
|
|
||||||
try:
|
|
||||||
delete_object(
|
|
||||||
storage_object.wallet,
|
|
||||||
storage_object.cid,
|
|
||||||
storage_object.oid,
|
|
||||||
client_shell,
|
|
||||||
cluster.default_rpc_endpoint,
|
|
||||||
)
|
|
||||||
except Exception as ex:
|
|
||||||
ex_message = str(ex)
|
|
||||||
# It's okay if object already removed
|
|
||||||
if not re.search(OBJECT_NOT_FOUND, ex_message) and not re.search(OBJECT_ALREADY_REMOVED, ex_message):
|
|
||||||
raise ex
|
|
||||||
logger.debug(ex_message)
|
|
||||||
|
|
||||||
|
|
||||||
@wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME))
|
@wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME))
|
||||||
def check_object_not_found(wallet: WalletInfo, cid: str, oid: str, shell: Shell, rpc_endpoint: str):
|
def check_object_not_found(wallet: WalletInfo, cid: str, oid: str, shell: Shell, rpc_endpoint: str):
|
||||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||||
head_object(
|
head_object(wallet, cid, oid, shell, rpc_endpoint)
|
||||||
wallet,
|
|
||||||
cid,
|
|
||||||
oid,
|
|
||||||
shell,
|
|
||||||
rpc_endpoint,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def verify_object_available(wallet: WalletInfo, cid: str, oid: str, shell: Shell, rpc_endpoint: str):
|
def verify_object_available(wallet: WalletInfo, cid: str, oid: str, shell: Shell, rpc_endpoint: str):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
head_object(
|
head_object(wallet, cid, oid, shell, rpc_endpoint)
|
||||||
wallet,
|
|
||||||
cid,
|
|
||||||
oid,
|
|
||||||
shell,
|
|
||||||
rpc_endpoint,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
|
@pytest.mark.grpc_api
|
||||||
@pytest.mark.grpc_object_lock
|
@pytest.mark.grpc_object_lock
|
||||||
class TestObjectLockWithGrpc(ClusterTestBase):
|
class TestObjectLockWithGrpc(ClusterTestBase):
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
|
@ -142,9 +94,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
|
||||||
with reporter.step("Creating locked object"):
|
with reporter.step("Creating locked object"):
|
||||||
current_epoch = self.get_epoch()
|
current_epoch = self.get_epoch()
|
||||||
|
|
||||||
storage_object = user_container.generate_object(
|
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME)
|
||||||
object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
|
|
||||||
)
|
|
||||||
lock_object(
|
lock_object(
|
||||||
storage_object.wallet,
|
storage_object.wallet,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
|
@ -220,9 +170,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
|
||||||
1,
|
1,
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title(
|
@allure.title("Lock must contain valid lifetime or expire_at field: (lifetime={wrong_lifetime}, expire-at={wrong_expire_at})")
|
||||||
"Lock must contain valid lifetime or expire_at field: (lifetime={wrong_lifetime}, expire-at={wrong_expire_at})"
|
|
||||||
)
|
|
||||||
# We operate with only lock object here so no complex object needed in this test
|
# We operate with only lock object here so no complex object needed in this test
|
||||||
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
|
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
|
@ -675,9 +623,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Generate two objects"):
|
with reporter.step("Generate two objects"):
|
||||||
for epoch_i in range(2):
|
for epoch_i in range(2):
|
||||||
storage_objects.append(
|
storage_objects.append(user_container.generate_object(object_size.value, expire_at=current_epoch + epoch_i + 3))
|
||||||
user_container.generate_object(object_size.value, expire_at=current_epoch + epoch_i + 3)
|
|
||||||
)
|
|
||||||
|
|
||||||
self.tick_epoch()
|
self.tick_epoch()
|
||||||
|
|
||||||
|
|
62
pytest_tests/testsuites/object/test_object_tombstone.py
Normal file
62
pytest_tests/testsuites/object/test_object_tombstone.py
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.resources.common import EXPIRATION_EPOCH_ATTRIBUTE
|
||||||
|
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
||||||
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
|
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
||||||
|
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.utils.file_utils import TestFile
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
|
@pytest.mark.grpc_api
|
||||||
|
class TestObjectTombstone(ClusterTestBase):
|
||||||
|
@pytest.fixture()
|
||||||
|
@allure.title("Change tombstone lifetime")
|
||||||
|
def tombstone_lifetime(self, cluster_state_controller: ClusterStateController, request: pytest.FixtureRequest):
|
||||||
|
config_manager = cluster_state_controller.manager(ConfigStateManager)
|
||||||
|
config_manager.set_on_all_nodes(StorageNode, {"object:delete:tombstone_lifetime": request.param}, True)
|
||||||
|
|
||||||
|
yield f"Tombstone lifetime was changed to {request.param}"
|
||||||
|
|
||||||
|
config_manager.revert_all(True)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("object_size, tombstone_lifetime", [("simple", 2)], indirect=True)
|
||||||
|
@allure.title("Tombstone object should be removed after expiration")
|
||||||
|
def test_tombstone_lifetime(
|
||||||
|
self,
|
||||||
|
new_epoch: int,
|
||||||
|
container: str,
|
||||||
|
grpc_client: GrpcClientWrapper,
|
||||||
|
test_file: TestFile,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
tombstone_lifetime: str,
|
||||||
|
):
|
||||||
|
allure.dynamic.description(tombstone_lifetime)
|
||||||
|
|
||||||
|
with reporter.step("Put object"):
|
||||||
|
oid = grpc_client.object.put(test_file.path, container, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step("Remove object"):
|
||||||
|
tombstone_oid = grpc_client.object.delete(container, oid, rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step("Get tombstone object lifetime"):
|
||||||
|
tombstone_info = grpc_client.object.head(container, tombstone_oid, rpc_endpoint)
|
||||||
|
tombstone_expiration_epoch = tombstone_info["header"]["attributes"][EXPIRATION_EPOCH_ATTRIBUTE]
|
||||||
|
|
||||||
|
with reporter.step("Tombstone lifetime should be <= 3"):
|
||||||
|
epochs_to_skip = int(tombstone_expiration_epoch) - new_epoch + 1
|
||||||
|
assert epochs_to_skip <= 3
|
||||||
|
|
||||||
|
with reporter.step("Wait for tombstone expiration"):
|
||||||
|
self.tick_epochs(epochs_to_skip)
|
||||||
|
|
||||||
|
with reporter.step("Tombstone should be removed after expiration"):
|
||||||
|
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
|
||||||
|
grpc_client.object.head(container, tombstone_oid, rpc_endpoint)
|
||||||
|
|
||||||
|
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
|
||||||
|
grpc_client.object.get(container, tombstone_oid, rpc_endpoint)
|
|
@ -7,9 +7,7 @@ from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.resources.error_patterns import OBJECT_IS_LOCKED
|
from frostfs_testlib.resources.error_patterns import OBJECT_IS_LOCKED
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL_F
|
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.testing.test_control import expect_not_raises
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
|
@ -18,6 +16,8 @@ from frostfs_testlib.utils.file_utils import TestFile, get_file_hash
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
|
@pytest.mark.grpc_api
|
||||||
@pytest.mark.grpc_without_user
|
@pytest.mark.grpc_without_user
|
||||||
class TestObjectApiWithoutUser(ClusterTestBase):
|
class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
def _parse_oid(self, stdout: str) -> str:
|
def _parse_oid(self, stdout: str) -> str:
|
||||||
|
@ -30,96 +30,72 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
tombstone = id_str.split(":")[1]
|
tombstone = id_str.split(":")[1]
|
||||||
return tombstone.strip()
|
return tombstone.strip()
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
|
||||||
def public_container(self, default_wallet: WalletInfo) -> str:
|
|
||||||
with reporter.step("Create public container"):
|
|
||||||
cid_public = create_container(
|
|
||||||
default_wallet,
|
|
||||||
self.shell,
|
|
||||||
self.cluster.default_rpc_endpoint,
|
|
||||||
basic_acl=PUBLIC_ACL_F,
|
|
||||||
)
|
|
||||||
|
|
||||||
return cid_public
|
|
||||||
|
|
||||||
@pytest.fixture(scope="class")
|
@pytest.fixture(scope="class")
|
||||||
def frostfs_cli(self, client_shell: Shell) -> FrostfsCli:
|
def cli_without_wallet(self, client_shell: Shell) -> FrostfsCli:
|
||||||
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC)
|
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC)
|
||||||
|
|
||||||
@allure.title("Get public container by native API with generate private key")
|
@allure.title("Get public container by native API with generate private key")
|
||||||
def test_get_container_with_generated_key(self, frostfs_cli: FrostfsCli, public_container: str):
|
def test_get_container_with_generated_key(self, cli_without_wallet: FrostfsCli, container: str, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `container get` native API with flag `--generate-key`.
|
Validate `container get` native API with flag `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Get container with generate key"):
|
with reporter.step("Get container with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
frostfs_cli.container.get(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
cli_without_wallet.container.get(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
||||||
|
|
||||||
@allure.title("Get list containers by native API with generate private key")
|
@allure.title("Get list containers by native API with generate private key")
|
||||||
def test_list_containers_with_generated_key(self, frostfs_cli: FrostfsCli, default_wallet: WalletInfo, public_container: str):
|
def test_list_containers_with_generated_key(
|
||||||
|
self, cli_without_wallet: FrostfsCli, default_wallet: WalletInfo, container: str, rpc_endpoint: str
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Validate `container list` native API with flag `--generate-key`.
|
Validate `container list` native API with flag `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
owner = default_wallet.get_address_from_json(0)
|
owner = default_wallet.get_address_from_json(0)
|
||||||
|
|
||||||
with reporter.step("List containers with generate key"):
|
with reporter.step("List containers with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
result = frostfs_cli.container.list(rpc_endpoint, owner=owner, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
result = cli_without_wallet.container.list(rpc_endpoint, owner=owner, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
||||||
|
|
||||||
with reporter.step("Expect container in received containers list"):
|
with reporter.step("Expect container in received containers list"):
|
||||||
containers = result.stdout.split()
|
containers = result.stdout.split()
|
||||||
assert public_container in containers
|
assert container in containers
|
||||||
|
|
||||||
@allure.title("Get list of public container objects by native API with generate private key")
|
@allure.title("Get list of public container objects by native API with generate private key")
|
||||||
def test_list_objects_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str):
|
def test_list_objects_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `container list_objects` native API with flag `--generate-key`.
|
Validate `container list_objects` native API with flag `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("List objects with generate key"):
|
with reporter.step("List objects with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
result = frostfs_cli.container.list_objects(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
result = cli_without_wallet.container.list_objects(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
||||||
|
|
||||||
with reporter.step("Expect empty objects list"):
|
with reporter.step("Expect empty objects list"):
|
||||||
objects = result.stdout.split()
|
objects = result.stdout.split()
|
||||||
assert len(objects) == 0, objects
|
assert len(objects) == 0, objects
|
||||||
|
|
||||||
@allure.title("Search public container nodes by native API with generate private key")
|
@allure.title("Search public container nodes by native API with generate private key")
|
||||||
def test_search_nodes_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str):
|
def test_search_nodes_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `container search_node` native API with flag `--generate-key`.
|
Validate `container search_node` native API with flag `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Search nodes with generate key"):
|
with reporter.step("Search nodes with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
frostfs_cli.container.search_node(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
cli_without_wallet.container.search_node(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
||||||
|
|
||||||
@allure.title("Put object into public container by native API with generate private key (obj_size={object_size})")
|
@allure.title("Put object into public container by native API with generate private key (obj_size={object_size})")
|
||||||
def test_put_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
|
def test_put_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `object put` into container with public ACL and flag `--generate-key`.
|
Validate `object put` into container with public ACL and flag `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Put object with generate key"):
|
with reporter.step("Put object with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
result = frostfs_cli.object.put(
|
result = cli_without_wallet.object.put(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
file_path,
|
file_path,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
no_progress=True,
|
no_progress=True,
|
||||||
|
@ -129,26 +105,24 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
oid = self._parse_oid(result.stdout)
|
oid = self._parse_oid(result.stdout)
|
||||||
|
|
||||||
with reporter.step("List objects with generate key"):
|
with reporter.step("List objects with generate key"):
|
||||||
result = frostfs_cli.container.list_objects(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
result = cli_without_wallet.container.list_objects(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
||||||
|
|
||||||
with reporter.step("Expect object in received objects list"):
|
with reporter.step("Expect object in received objects list"):
|
||||||
objects = result.stdout.split()
|
objects = result.stdout.split()
|
||||||
assert oid in objects, objects
|
assert oid in objects, objects
|
||||||
|
|
||||||
@allure.title("Get public container object by native API with generate private key (obj_size={object_size})")
|
@allure.title("Get public container object by native API with generate private key (obj_size={object_size})")
|
||||||
def test_get_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
|
def test_get_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `object get` for container with public ACL and flag `--generate-key`.
|
Validate `object get` for container with public ACL and flag `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
expected_hash = get_file_hash(file_path)
|
expected_hash = get_file_hash(file_path)
|
||||||
|
|
||||||
with reporter.step("Put object with generate key"):
|
with reporter.step("Put object with generate key"):
|
||||||
result = frostfs_cli.object.put(
|
result = cli_without_wallet.object.put(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
file_path,
|
file_path,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
no_progress=True,
|
no_progress=True,
|
||||||
|
@ -159,9 +133,9 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Get object with generate key"):
|
with reporter.step("Get object with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
frostfs_cli.object.get(
|
cli_without_wallet.object.get(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
oid,
|
oid,
|
||||||
file=file_path,
|
file=file_path,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
|
@ -175,18 +149,15 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
assert expected_hash == downloaded_hash
|
assert expected_hash == downloaded_hash
|
||||||
|
|
||||||
@allure.title("Head public container object by native API with generate private key (obj_size={object_size})")
|
@allure.title("Head public container object by native API with generate private key (obj_size={object_size})")
|
||||||
def test_head_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
|
def test_head_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `object head` for container with public ACL and flag `--generate-key`.
|
Validate `object head` for container with public ACL and flag `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Put object with generate key"):
|
with reporter.step("Put object with generate key"):
|
||||||
result = frostfs_cli.object.put(
|
result = cli_without_wallet.object.put(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
file_path,
|
file_path,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
no_progress=True,
|
no_progress=True,
|
||||||
|
@ -197,21 +168,18 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Head object with generate key"):
|
with reporter.step("Head object with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
frostfs_cli.object.head(rpc_endpoint, cid, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
cli_without_wallet.object.head(rpc_endpoint, container, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
||||||
|
|
||||||
@allure.title("Delete public container object by native API with generate private key (obj_size={object_size})")
|
@allure.title("Delete public container object by native API with generate private key (obj_size={object_size})")
|
||||||
def test_delete_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
|
def test_delete_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `object delete` for container with public ACL and flag `--generate key`.
|
Validate `object delete` for container with public ACL and flag `--generate key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Put object with generate key"):
|
with reporter.step("Put object with generate key"):
|
||||||
result = frostfs_cli.object.put(
|
result = cli_without_wallet.object.put(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
file_path,
|
file_path,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
no_progress=True,
|
no_progress=True,
|
||||||
|
@ -222,14 +190,14 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Delete object with generate key"):
|
with reporter.step("Delete object with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
result = frostfs_cli.object.delete(rpc_endpoint, cid, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
result = cli_without_wallet.object.delete(rpc_endpoint, container, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
||||||
|
|
||||||
oid = self._parse_tombstone_oid(result.stdout)
|
oid = self._parse_tombstone_oid(result.stdout)
|
||||||
|
|
||||||
with reporter.step("Head object with generate key"):
|
with reporter.step("Head object with generate key"):
|
||||||
result = frostfs_cli.object.head(
|
result = cli_without_wallet.object.head(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
oid,
|
oid,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
timeout=CLI_DEFAULT_TIMEOUT,
|
timeout=CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -240,19 +208,16 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
assert object_type == "TOMBSTONE", object_type
|
assert object_type == "TOMBSTONE", object_type
|
||||||
|
|
||||||
@allure.title("Lock public container object by native API with generate private key (obj_size={object_size})")
|
@allure.title("Lock public container object by native API with generate private key (obj_size={object_size})")
|
||||||
def test_lock_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
|
def test_lock_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `object lock` for container with public ACL and flag `--generate-key`.
|
Validate `object lock` for container with public ACL and flag `--generate-key`.
|
||||||
Attempt to delete the locked object.
|
Attempt to delete the locked object.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Put object with generate key"):
|
with reporter.step("Put object with generate key"):
|
||||||
result = frostfs_cli.object.put(
|
result = cli_without_wallet.object.put(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
file_path,
|
file_path,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
no_progress=True,
|
no_progress=True,
|
||||||
|
@ -263,9 +228,9 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Lock object with generate key"):
|
with reporter.step("Lock object with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
frostfs_cli.object.lock(
|
cli_without_wallet.object.lock(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
oid,
|
oid,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
timeout=CLI_DEFAULT_TIMEOUT,
|
timeout=CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -274,27 +239,24 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Delete locked object with generate key and expect error"):
|
with reporter.step("Delete locked object with generate key and expect error"):
|
||||||
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
|
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
|
||||||
frostfs_cli.object.delete(
|
cli_without_wallet.object.delete(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
oid,
|
oid,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
timeout=CLI_DEFAULT_TIMEOUT,
|
timeout=CLI_DEFAULT_TIMEOUT,
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Search public container objects by native API with generate private key (obj_size={object_size})")
|
@allure.title("Search public container objects by native API with generate private key (obj_size={object_size})")
|
||||||
def test_search_object_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
|
def test_search_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `object search` for container with public ACL and flag `--generate-key`.
|
Validate `object search` for container with public ACL and flag `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Put object with generate key"):
|
with reporter.step("Put object with generate key"):
|
||||||
result = frostfs_cli.object.put(
|
result = cli_without_wallet.object.put(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
file_path,
|
file_path,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
no_progress=True,
|
no_progress=True,
|
||||||
|
@ -305,25 +267,22 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Object search with generate key"):
|
with reporter.step("Object search with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
result = frostfs_cli.object.search(rpc_endpoint, cid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
result = cli_without_wallet.object.search(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
|
||||||
|
|
||||||
with reporter.step("Expect object in received objects list of container"):
|
with reporter.step("Expect object in received objects list of container"):
|
||||||
object_ids = re.findall(r"(\w{43,44})", result.stdout)
|
object_ids = re.findall(r"(\w{43,44})", result.stdout)
|
||||||
assert oid in object_ids
|
assert oid in object_ids
|
||||||
|
|
||||||
@allure.title("Get range of public container object by native API with generate private key (obj_size={object_size})")
|
@allure.title("Get range of public container object by native API with generate private key (obj_size={object_size})")
|
||||||
def test_range_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
|
def test_range_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `object range` for container with public ACL and `--generate-key`.
|
Validate `object range` for container with public ACL and `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Put object with generate key"):
|
with reporter.step("Put object with generate key"):
|
||||||
result = frostfs_cli.object.put(
|
result = cli_without_wallet.object.put(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
file_path,
|
file_path,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
no_progress=True,
|
no_progress=True,
|
||||||
|
@ -334,9 +293,9 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Get range of object with generate key"):
|
with reporter.step("Get range of object with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
frostfs_cli.object.range(
|
cli_without_wallet.object.range(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
oid,
|
oid,
|
||||||
"0:10",
|
"0:10",
|
||||||
file=file_path,
|
file=file_path,
|
||||||
|
@ -345,18 +304,15 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Get hash of public container object by native API with generate private key (obj_size={object_size})")
|
@allure.title("Get hash of public container object by native API with generate private key (obj_size={object_size})")
|
||||||
def test_hash_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
|
def test_hash_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `object hash` for container with public ACL and `--generate-key`.
|
Validate `object hash` for container with public ACL and `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Put object with generate key"):
|
with reporter.step("Put object with generate key"):
|
||||||
result = frostfs_cli.object.put(
|
result = cli_without_wallet.object.put(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
file_path,
|
file_path,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
no_progress=True,
|
no_progress=True,
|
||||||
|
@ -367,9 +323,9 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Get range hash of object with generate key"):
|
with reporter.step("Get range hash of object with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
frostfs_cli.object.hash(
|
cli_without_wallet.object.hash(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
oid,
|
oid,
|
||||||
range="0:10",
|
range="0:10",
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
|
@ -377,18 +333,15 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Get public container object nodes by native API with generate private key (obj_size={object_size})")
|
@allure.title("Get public container object nodes by native API with generate private key (obj_size={object_size})")
|
||||||
def test_nodes_with_generate_key(self, frostfs_cli: FrostfsCli, public_container: str, file_path: TestFile):
|
def test_nodes_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, file_path: TestFile, rpc_endpoint: str):
|
||||||
"""
|
"""
|
||||||
Validate `object nodes` for container with public ACL and `--generate-key`.
|
Validate `object nodes` for container with public ACL and `--generate-key`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cid = public_container
|
|
||||||
rpc_endpoint = self.cluster.default_rpc_endpoint
|
|
||||||
|
|
||||||
with reporter.step("Put object with generate key"):
|
with reporter.step("Put object with generate key"):
|
||||||
result = frostfs_cli.object.put(
|
result = cli_without_wallet.object.put(
|
||||||
rpc_endpoint,
|
rpc_endpoint,
|
||||||
cid,
|
container,
|
||||||
file_path,
|
file_path,
|
||||||
no_progress=True,
|
no_progress=True,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
|
@ -400,14 +353,14 @@ class TestObjectApiWithoutUser(ClusterTestBase):
|
||||||
with reporter.step("Configure frostfs-cli for alive remote node"):
|
with reporter.step("Configure frostfs-cli for alive remote node"):
|
||||||
alive_node = self.cluster.cluster_nodes[0]
|
alive_node = self.cluster.cluster_nodes[0]
|
||||||
node_shell = alive_node.host.get_shell()
|
node_shell = alive_node.host.get_shell()
|
||||||
rpc_endpoint = alive_node.storage_node.get_rpc_endpoint()
|
alive_endpoint = alive_node.storage_node.get_rpc_endpoint()
|
||||||
node_frostfs_cli = FrostfsCli(node_shell, FROSTFS_CLI_EXEC)
|
node_frostfs_cli_wo_wallet = FrostfsCli(node_shell, FROSTFS_CLI_EXEC)
|
||||||
|
|
||||||
with reporter.step("Get object nodes with generate key"):
|
with reporter.step("Get object nodes with generate key"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
node_frostfs_cli.object.nodes(
|
node_frostfs_cli_wo_wallet.object.nodes(
|
||||||
rpc_endpoint,
|
alive_endpoint,
|
||||||
cid,
|
container,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
generate_key=True,
|
generate_key=True,
|
||||||
timeout=CLI_DEFAULT_TIMEOUT,
|
timeout=CLI_DEFAULT_TIMEOUT,
|
||||||
|
|
0
pytest_tests/testsuites/replication/__init__.py
Normal file
0
pytest_tests/testsuites/replication/__init__.py
Normal file
File diff suppressed because it is too large
Load diff
0
pytest_tests/testsuites/services/__init__.py
Normal file
0
pytest_tests/testsuites/services/__init__.py
Normal file
0
pytest_tests/testsuites/services/http_gate/__init__.py
Normal file
0
pytest_tests/testsuites/services/http_gate/__init__.py
Normal file
|
@ -4,9 +4,7 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.steps.acl import bearer_token_base64_from_file
|
from frostfs_testlib.steps.acl import bearer_token_base64_from_file
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
|
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
from frostfs_testlib.storage.dataclasses import ape
|
from frostfs_testlib.storage.dataclasses import ape
|
||||||
|
@ -15,7 +13,8 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
from pytest_tests.helpers.bearer_token import create_bearer_token
|
from ....helpers.bearer_token import create_bearer_token
|
||||||
|
from ....helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest, requires_container
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
@ -24,55 +23,43 @@ logger = logging.getLogger("NeoLogger")
|
||||||
@pytest.mark.http_put
|
@pytest.mark.http_put
|
||||||
class Test_http_bearer(ClusterTestBase):
|
class Test_http_bearer(ClusterTestBase):
|
||||||
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
|
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
|
||||||
|
OWNER_ROLE = ape.Condition.by_role(ape.Role.OWNER)
|
||||||
|
CUSTOM_APE_RULE = ape.Rule(ape.Verb.DENY, ape.ObjectOperations.PUT, OWNER_ROLE)
|
||||||
|
|
||||||
@pytest.fixture(scope="class")
|
@pytest.fixture()
|
||||||
def user_container(self, frostfs_cli: FrostfsCli, default_wallet: WalletInfo, cluster: Cluster) -> str:
|
def bearer_token(self, frostfs_cli: FrostfsCli, container: str, temp_directory: str, cluster: Cluster) -> str:
|
||||||
with reporter.step("Create container"):
|
|
||||||
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, self.PLACEMENT_RULE, PUBLIC_ACL)
|
|
||||||
|
|
||||||
with reporter.step("Deny PUT via APE rule to container"):
|
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OWNER)
|
|
||||||
rule = ape.Rule(ape.Verb.DENY, ape.ObjectOperations.PUT, role_condition)
|
|
||||||
frostfs_cli.ape_manager.add(
|
|
||||||
cluster.default_rpc_endpoint, rule.chain_id, target_name=cid, target_type="container", rule=rule.as_string()
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Wait for one block"):
|
|
||||||
self.wait_for_blocks()
|
|
||||||
|
|
||||||
return cid
|
|
||||||
|
|
||||||
@pytest.fixture(scope="class")
|
|
||||||
def bearer_token(self, frostfs_cli: FrostfsCli, user_container: str, temp_directory: str, cluster: Cluster) -> str:
|
|
||||||
with reporter.step(f"Create bearer token for {ape.Role.OTHERS} with all operations allowed"):
|
with reporter.step(f"Create bearer token for {ape.Role.OTHERS} with all operations allowed"):
|
||||||
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
|
||||||
rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, role_condition)
|
rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, role_condition)
|
||||||
bearer = create_bearer_token(frostfs_cli, temp_directory, user_container, rule, cluster.default_rpc_endpoint)
|
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
return bearer_token_base64_from_file(bearer)
|
return bearer_token_base64_from_file(bearer)
|
||||||
|
|
||||||
@allure.title(f"[NEGATIVE] Put object without bearer token for {ape.Role.OTHERS}")
|
@allure.title(f"[NEGATIVE] Put object without bearer token for {ape.Role.OTHERS}")
|
||||||
def test_unable_put_without_bearer_token(self, simple_object_size: ObjectSize, user_container: str):
|
def test_unable_put_without_bearer_token(self, simple_object_size: ObjectSize, container: str):
|
||||||
upload_via_http_gate_curl(
|
upload_via_http_gate_curl(
|
||||||
cid=user_container,
|
cid=container,
|
||||||
filepath=generate_file(simple_object_size.value),
|
filepath=generate_file(simple_object_size.value),
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
endpoint=self.cluster.default_http_gate_endpoint,
|
||||||
error_pattern="access to object operation denied",
|
error_pattern="access to object operation denied",
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Put object via HTTP using bearer token (object_size={object_size})")
|
@allure.title("Put object via HTTP using bearer token (object_size={object_size})")
|
||||||
def test_put_with_bearer_when_eacl_restrict(
|
@requires_container(
|
||||||
|
ContainerRequest(PLACEMENT_RULE, [APE_EVERYONE_ALLOW_ALL, CUSTOM_APE_RULE], short_name="custom with denied owner put")
|
||||||
|
)
|
||||||
|
def test_put_with_bearer_when_ape_restrict(
|
||||||
self,
|
self,
|
||||||
object_size: ObjectSize,
|
object_size: ObjectSize,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
user_container: str,
|
container: str,
|
||||||
bearer_token: str,
|
bearer_token: str,
|
||||||
):
|
):
|
||||||
file_path = generate_file(object_size.value)
|
file_path = generate_file(object_size.value)
|
||||||
with reporter.step(f"Put object with bearer token for {ape.Role.OTHERS}, then get and verify hashes"):
|
with reporter.step(f"Put object with bearer token for {ape.Role.OTHERS}, then get and verify hashes"):
|
||||||
headers = [f" -H 'Authorization: Bearer {bearer_token}'"]
|
headers = [f" -H 'Authorization: Bearer {bearer_token}'"]
|
||||||
oid = upload_via_http_gate_curl(
|
oid = upload_via_http_gate_curl(
|
||||||
cid=user_container,
|
cid=container,
|
||||||
filepath=file_path,
|
filepath=file_path,
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
endpoint=self.cluster.default_http_gate_endpoint,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
|
@ -81,7 +68,7 @@ class Test_http_bearer(ClusterTestBase):
|
||||||
oid=oid,
|
oid=oid,
|
||||||
file_name=file_path,
|
file_name=file_path,
|
||||||
wallet=default_wallet,
|
wallet=default_wallet,
|
||||||
cid=user_container,
|
cid=container,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
nodes=self.cluster.storage_nodes,
|
nodes=self.cluster.storage_nodes,
|
||||||
request_node=self.cluster.cluster_nodes[0],
|
request_node=self.cluster.cluster_nodes[0],
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
||||||
from frostfs_testlib.steps.epoch import get_epoch
|
from frostfs_testlib.steps.epoch import get_epoch
|
||||||
from frostfs_testlib.steps.http.http_gate import (
|
from frostfs_testlib.steps.http.http_gate import (
|
||||||
|
@ -17,10 +15,12 @@ from frostfs_testlib.steps.http.http_gate import (
|
||||||
verify_object_hash,
|
verify_object_hash,
|
||||||
)
|
)
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
|
from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash
|
||||||
|
|
||||||
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
|
from ....helpers.container_request import REP_1_1_1_PUBLIC, REP_2_2_2_PUBLIC, requires_container
|
||||||
|
from ....helpers.utility import wait_for_gc_pass_on_storage_nodes
|
||||||
|
|
||||||
OBJECT_NOT_FOUND_ERROR = "not found"
|
OBJECT_NOT_FOUND_ERROR = "not found"
|
||||||
|
|
||||||
|
@ -31,68 +31,40 @@ OBJECT_NOT_FOUND_ERROR = "not found"
|
||||||
)
|
)
|
||||||
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
|
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
|
||||||
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
|
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
@pytest.mark.http_gate
|
@pytest.mark.http_gate
|
||||||
class TestHttpGate(ClusterTestBase):
|
class TestHttpGate(ClusterTestBase):
|
||||||
PLACEMENT_RULE_1 = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
|
@allure.title("Put over gRPC, Get over HTTP (object_size={object_size})")
|
||||||
PLACEMENT_RULE_2 = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
@requires_container(REP_1_1_1_PUBLIC)
|
||||||
|
def test_put_grpc_get_http(self, default_wallet: WalletInfo, container: str, test_file: TestFile):
|
||||||
@pytest.fixture(scope="class", autouse=True)
|
|
||||||
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
|
|
||||||
def prepare_wallet(self, default_wallet):
|
|
||||||
TestHttpGate.wallet = default_wallet
|
|
||||||
|
|
||||||
@allure.title("Put over gRPC, Get over HTTP")
|
|
||||||
def test_put_grpc_get_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
|
|
||||||
"""
|
"""
|
||||||
Test that object can be put using gRPC interface and get using HTTP.
|
Test that object can be put using gRPC interface and get using HTTP.
|
||||||
|
|
||||||
Steps:
|
Steps:
|
||||||
1. Create simple and large objects.
|
1. Create object.
|
||||||
2. Put objects using gRPC (frostfs-cli).
|
2. Put object using gRPC (frostfs-cli).
|
||||||
3. Download objects using HTTP gate (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading).
|
3. Download object using HTTP gate (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading).
|
||||||
4. Get objects using gRPC (frostfs-cli).
|
4. Get object using gRPC (frostfs-cli).
|
||||||
5. Compare hashes for got objects.
|
5. Compare hashes for got object.
|
||||||
6. Compare hashes for got and original objects.
|
6. Compare hashes for got and original objects.
|
||||||
|
|
||||||
Expected result:
|
Expected result:
|
||||||
Hashes must be the same.
|
Hashes must be the same.
|
||||||
"""
|
"""
|
||||||
cid = create_container(
|
|
||||||
self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE_1,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
file_path_simple = generate_file(simple_object_size.value)
|
|
||||||
file_path_large = generate_file(complex_object_size.value)
|
|
||||||
|
|
||||||
with reporter.step("Put objects using gRPC"):
|
with reporter.step("Put object using gRPC"):
|
||||||
oid_simple = put_object_to_random_node(
|
object_id = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster)
|
||||||
wallet=self.wallet,
|
|
||||||
path=file_path_simple,
|
|
||||||
cid=cid,
|
|
||||||
shell=self.shell,
|
|
||||||
cluster=self.cluster,
|
|
||||||
)
|
|
||||||
oid_large = put_object_to_random_node(
|
|
||||||
wallet=self.wallet,
|
|
||||||
path=file_path_large,
|
|
||||||
cid=cid,
|
|
||||||
shell=self.shell,
|
|
||||||
cluster=self.cluster,
|
|
||||||
)
|
|
||||||
|
|
||||||
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
|
with reporter.step("Get object and check hash"):
|
||||||
verify_object_hash(
|
verify_object_hash(
|
||||||
oid=oid,
|
object_id,
|
||||||
file_name=file_path,
|
test_file.path,
|
||||||
wallet=self.wallet,
|
default_wallet,
|
||||||
cid=cid,
|
container,
|
||||||
shell=self.shell,
|
self.shell,
|
||||||
nodes=self.cluster.storage_nodes,
|
self.cluster.storage_nodes,
|
||||||
request_node=self.cluster.cluster_nodes[0],
|
self.cluster.cluster_nodes[0],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -107,9 +79,10 @@ class TestHttpGate(ClusterTestBase):
|
||||||
class TestHttpPut(ClusterTestBase):
|
class TestHttpPut(ClusterTestBase):
|
||||||
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
|
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
|
||||||
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
|
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
|
||||||
@allure.title("Put over HTTP, Get over HTTP")
|
@allure.title("Put over HTTP, Get over HTTP (object_size={object_size})")
|
||||||
@pytest.mark.smoke
|
@pytest.mark.smoke
|
||||||
def test_put_http_get_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
|
@requires_container(REP_2_2_2_PUBLIC)
|
||||||
|
def test_put_http_get_http(self, container: str, default_wallet: WalletInfo, test_file: TestFile):
|
||||||
"""
|
"""
|
||||||
Test that object can be put and get using HTTP interface.
|
Test that object can be put and get using HTTP interface.
|
||||||
|
|
||||||
|
@ -122,33 +95,19 @@ class TestHttpPut(ClusterTestBase):
|
||||||
Expected result:
|
Expected result:
|
||||||
Hashes must be the same.
|
Hashes must be the same.
|
||||||
"""
|
"""
|
||||||
cid = create_container(
|
|
||||||
self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE_2,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
file_path_simple = generate_file(simple_object_size.value)
|
|
||||||
file_path_large = generate_file(complex_object_size.value)
|
|
||||||
|
|
||||||
with reporter.step("Put objects using HTTP"):
|
with reporter.step("Put object using HTTP"):
|
||||||
oid_simple = upload_via_http_gate(
|
object_id = upload_via_http_gate(container, test_file.path, self.cluster.default_http_gate_endpoint)
|
||||||
cid=cid, path=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint
|
|
||||||
)
|
|
||||||
oid_large = upload_via_http_gate(
|
|
||||||
cid=cid, path=file_path_large, endpoint=self.cluster.default_http_gate_endpoint
|
|
||||||
)
|
|
||||||
|
|
||||||
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
|
with reporter.step("Get object and check hash"):
|
||||||
verify_object_hash(
|
verify_object_hash(
|
||||||
oid=oid,
|
object_id,
|
||||||
file_name=file_path,
|
test_file.path,
|
||||||
wallet=self.wallet,
|
default_wallet,
|
||||||
cid=cid,
|
container,
|
||||||
shell=self.shell,
|
self.shell,
|
||||||
nodes=self.cluster.storage_nodes,
|
self.cluster.storage_nodes,
|
||||||
request_node=self.cluster.cluster_nodes[0],
|
self.cluster.cluster_nodes[0],
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.link(
|
@allure.link(
|
||||||
|
@ -165,7 +124,8 @@ class TestHttpPut(ClusterTestBase):
|
||||||
],
|
],
|
||||||
ids=["simple", "hyphen", "percent"],
|
ids=["simple", "hyphen", "percent"],
|
||||||
)
|
)
|
||||||
def test_put_http_get_http_with_headers(self, attributes: dict, simple_object_size: ObjectSize, id: str):
|
@requires_container(REP_2_2_2_PUBLIC)
|
||||||
|
def test_put_http_get_http_with_headers(self, container: str, attributes: dict, simple_object_size: ObjectSize, id: str):
|
||||||
"""
|
"""
|
||||||
Test that object can be downloaded using different attributes in HTTP header.
|
Test that object can be downloaded using different attributes in HTTP header.
|
||||||
|
|
||||||
|
@ -178,46 +138,27 @@ class TestHttpPut(ClusterTestBase):
|
||||||
Expected result:
|
Expected result:
|
||||||
Hashes must be the same.
|
Hashes must be the same.
|
||||||
"""
|
"""
|
||||||
cid = create_container(
|
|
||||||
self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE_2,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
|
||||||
with reporter.step("Put objects using HTTP with attribute"):
|
with reporter.step("Put objects using HTTP with attribute"):
|
||||||
headers = attr_into_header(attributes)
|
headers = attr_into_header(attributes)
|
||||||
oid = upload_via_http_gate(
|
oid = upload_via_http_gate(container, file_path, self.cluster.default_http_gate_endpoint, headers)
|
||||||
cid=cid,
|
|
||||||
path=file_path,
|
|
||||||
headers=headers,
|
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
|
||||||
)
|
|
||||||
|
|
||||||
get_object_by_attr_and_verify_hashes(
|
get_object_by_attr_and_verify_hashes(
|
||||||
oid=oid,
|
oid,
|
||||||
file_name=file_path,
|
file_path,
|
||||||
cid=cid,
|
container,
|
||||||
attrs=attributes,
|
attributes,
|
||||||
node=self.cluster.cluster_nodes[0],
|
self.cluster.cluster_nodes[0],
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Expiration-Epoch in HTTP header (epoch_gap={epoch_gap})")
|
@allure.title("Expiration-Epoch in HTTP header (epoch_gap={epoch_gap})")
|
||||||
@pytest.mark.parametrize("epoch_gap", [0, 1])
|
@pytest.mark.parametrize("epoch_gap", [0, 1])
|
||||||
def test_expiration_epoch_in_http(self, simple_object_size: ObjectSize, epoch_gap: int):
|
@requires_container(REP_2_2_2_PUBLIC)
|
||||||
endpoint = self.cluster.default_rpc_endpoint
|
def test_expiration_epoch_in_http(self, container: str, simple_object_size: ObjectSize, epoch_gap: int):
|
||||||
http_endpoint = self.cluster.default_http_gate_endpoint
|
http_endpoint = self.cluster.default_http_gate_endpoint
|
||||||
min_valid_epoch = get_epoch(self.shell, self.cluster) + epoch_gap
|
min_valid_epoch = get_epoch(self.shell, self.cluster) + epoch_gap
|
||||||
|
|
||||||
cid = create_container(
|
|
||||||
self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE_2,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
oids_to_be_expired = []
|
oids_to_be_expired = []
|
||||||
oids_to_be_valid = []
|
oids_to_be_valid = []
|
||||||
|
@ -228,7 +169,7 @@ class TestHttpPut(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Put objects using HTTP with attribute Expiration-Epoch"):
|
with reporter.step("Put objects using HTTP with attribute Expiration-Epoch"):
|
||||||
oid = upload_via_http_gate(
|
oid = upload_via_http_gate(
|
||||||
cid=cid,
|
cid=container,
|
||||||
path=file_path,
|
path=file_path,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
endpoint=http_endpoint,
|
endpoint=http_endpoint,
|
||||||
|
@ -238,7 +179,7 @@ class TestHttpPut(ClusterTestBase):
|
||||||
else:
|
else:
|
||||||
oids_to_be_expired.append(oid)
|
oids_to_be_expired.append(oid)
|
||||||
with reporter.step("This object can be got"):
|
with reporter.step("This object can be got"):
|
||||||
get_via_http_gate(cid=cid, oid=oid, node=self.cluster.cluster_nodes[0])
|
get_via_http_gate(container, oid, self.cluster.cluster_nodes[0])
|
||||||
|
|
||||||
self.tick_epoch()
|
self.tick_epoch()
|
||||||
|
|
||||||
|
@ -248,24 +189,18 @@ class TestHttpPut(ClusterTestBase):
|
||||||
for oid in oids_to_be_expired:
|
for oid in oids_to_be_expired:
|
||||||
with reporter.step(f"{oid} shall be expired and cannot be got"):
|
with reporter.step(f"{oid} shall be expired and cannot be got"):
|
||||||
try_to_get_object_and_expect_error(
|
try_to_get_object_and_expect_error(
|
||||||
cid=cid,
|
cid=container,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
node=self.cluster.cluster_nodes[0],
|
node=self.cluster.cluster_nodes[0],
|
||||||
error_pattern=OBJECT_NOT_FOUND_ERROR,
|
error_pattern=OBJECT_NOT_FOUND_ERROR,
|
||||||
)
|
)
|
||||||
for oid in oids_to_be_valid:
|
for oid in oids_to_be_valid:
|
||||||
with reporter.step(f"{oid} shall be valid and can be got"):
|
with reporter.step(f"{oid} shall be valid and can be got"):
|
||||||
get_via_http_gate(cid=cid, oid=oid, node=self.cluster.cluster_nodes[0])
|
get_via_http_gate(cid=container, oid=oid, node=self.cluster.cluster_nodes[0])
|
||||||
|
|
||||||
@allure.title("Zip in HTTP header")
|
@allure.title("Zip in HTTP header")
|
||||||
def test_zip_in_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
|
@requires_container(REP_2_2_2_PUBLIC)
|
||||||
cid = create_container(
|
def test_zip_in_http(self, container: str, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
|
||||||
self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE_2,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
file_path_simple = generate_file(simple_object_size.value)
|
file_path_simple = generate_file(simple_object_size.value)
|
||||||
file_path_large = generate_file(complex_object_size.value)
|
file_path_large = generate_file(complex_object_size.value)
|
||||||
common_prefix = "my_files"
|
common_prefix = "my_files"
|
||||||
|
@ -274,45 +209,33 @@ class TestHttpPut(ClusterTestBase):
|
||||||
headers2 = {"X-Attribute-FilePath": f"{common_prefix}/file2"}
|
headers2 = {"X-Attribute-FilePath": f"{common_prefix}/file2"}
|
||||||
|
|
||||||
upload_via_http_gate(
|
upload_via_http_gate(
|
||||||
cid=cid,
|
cid=container,
|
||||||
path=file_path_simple,
|
path=file_path_simple,
|
||||||
headers=headers1,
|
headers=headers1,
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
endpoint=self.cluster.default_http_gate_endpoint,
|
||||||
)
|
)
|
||||||
upload_via_http_gate(
|
upload_via_http_gate(container, file_path_large, headers2, self.cluster.default_http_gate_endpoint)
|
||||||
cid=cid,
|
upload_via_http_gate(container, file_path_large, headers2, self.cluster.default_http_gate_endpoint)
|
||||||
path=file_path_large,
|
|
||||||
headers=headers2,
|
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
|
||||||
)
|
|
||||||
|
|
||||||
dir_path = get_via_zip_http_gate(cid=cid, prefix=common_prefix, node=self.cluster.cluster_nodes[0])
|
dir_path = get_via_zip_http_gate(cid=container, prefix=common_prefix, node=self.cluster.cluster_nodes[0])
|
||||||
|
|
||||||
with reporter.step("Verify hashes"):
|
with reporter.step("Verify hashes"):
|
||||||
assert get_file_hash(f"{dir_path}/file1") == get_file_hash(file_path_simple)
|
assert get_file_hash(f"{dir_path}/file1") == get_file_hash(file_path_simple)
|
||||||
assert get_file_hash(f"{dir_path}/file2") == get_file_hash(file_path_large)
|
assert get_file_hash(f"{dir_path}/file2") == get_file_hash(file_path_large)
|
||||||
|
|
||||||
@pytest.mark.long
|
|
||||||
@allure.title("Put over HTTP/Curl, Get over HTTP/Curl for large object")
|
@allure.title("Put over HTTP/Curl, Get over HTTP/Curl for large object")
|
||||||
def test_put_http_get_http_large_file(self, complex_object_size: ObjectSize):
|
@requires_container(REP_2_2_2_PUBLIC)
|
||||||
|
def test_put_http_get_http_large_file(self, default_wallet: WalletInfo, container: str, complex_object_size: ObjectSize):
|
||||||
"""
|
"""
|
||||||
This test checks upload and download using curl with 'large' object.
|
This test checks upload and download using curl with 'large' object.
|
||||||
Large is object with size up to 20Mb.
|
Large is object with size up to 20Mb.
|
||||||
"""
|
"""
|
||||||
cid = create_container(
|
|
||||||
self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE_2,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
|
|
||||||
file_path = generate_file(complex_object_size.value)
|
file_path = generate_file(complex_object_size.value)
|
||||||
|
|
||||||
with reporter.step("Put objects using HTTP"):
|
with reporter.step("Put objects using HTTP"):
|
||||||
oid_gate = upload_via_http_gate(cid=cid, path=file_path, endpoint=self.cluster.default_http_gate_endpoint)
|
oid_gate = upload_via_http_gate(cid=container, path=file_path, endpoint=self.cluster.default_http_gate_endpoint)
|
||||||
oid_curl = upload_via_http_gate_curl(
|
oid_curl = upload_via_http_gate_curl(
|
||||||
cid=cid,
|
cid=container,
|
||||||
filepath=file_path,
|
filepath=file_path,
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
endpoint=self.cluster.default_http_gate_endpoint,
|
||||||
)
|
)
|
||||||
|
@ -320,8 +243,8 @@ class TestHttpPut(ClusterTestBase):
|
||||||
verify_object_hash(
|
verify_object_hash(
|
||||||
oid=oid_gate,
|
oid=oid_gate,
|
||||||
file_name=file_path,
|
file_name=file_path,
|
||||||
wallet=self.wallet,
|
wallet=default_wallet,
|
||||||
cid=cid,
|
cid=container,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
nodes=self.cluster.storage_nodes,
|
nodes=self.cluster.storage_nodes,
|
||||||
request_node=self.cluster.cluster_nodes[0],
|
request_node=self.cluster.cluster_nodes[0],
|
||||||
|
@ -329,47 +252,32 @@ class TestHttpPut(ClusterTestBase):
|
||||||
verify_object_hash(
|
verify_object_hash(
|
||||||
oid=oid_curl,
|
oid=oid_curl,
|
||||||
file_name=file_path,
|
file_name=file_path,
|
||||||
wallet=self.wallet,
|
wallet=default_wallet,
|
||||||
cid=cid,
|
cid=container,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
nodes=self.cluster.storage_nodes,
|
nodes=self.cluster.storage_nodes,
|
||||||
request_node=self.cluster.cluster_nodes[0],
|
request_node=self.cluster.cluster_nodes[0],
|
||||||
object_getter=get_via_http_curl,
|
object_getter=get_via_http_curl,
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Put/Get over HTTP using Curl utility")
|
@allure.title("Put/Get over HTTP using Curl utility (object_size={object_size})")
|
||||||
def test_put_http_get_http_curl(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
|
@requires_container(REP_2_2_2_PUBLIC)
|
||||||
|
def test_put_http_get_http_curl(self, default_wallet: WalletInfo, container: str, test_file: TestFile):
|
||||||
"""
|
"""
|
||||||
Test checks upload and download over HTTP using curl utility.
|
Test checks upload and download over HTTP using curl utility.
|
||||||
"""
|
"""
|
||||||
cid = create_container(
|
|
||||||
self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE_2,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
file_path_simple = generate_file(simple_object_size.value)
|
|
||||||
file_path_large = generate_file(complex_object_size.value)
|
|
||||||
|
|
||||||
with reporter.step("Put objects using curl utility"):
|
with reporter.step("Put object using curl utility"):
|
||||||
oid_simple = upload_via_http_gate_curl(
|
object_id = upload_via_http_gate_curl(container, test_file.path, self.cluster.default_http_gate_endpoint)
|
||||||
cid=cid, filepath=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint
|
|
||||||
)
|
|
||||||
oid_large = upload_via_http_gate_curl(
|
|
||||||
cid=cid,
|
|
||||||
filepath=file_path_large,
|
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
|
||||||
)
|
|
||||||
|
|
||||||
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
|
with reporter.step("Get object and check hash"):
|
||||||
verify_object_hash(
|
verify_object_hash(
|
||||||
oid=oid,
|
object_id,
|
||||||
file_name=file_path,
|
test_file.path,
|
||||||
wallet=self.wallet,
|
default_wallet,
|
||||||
cid=cid,
|
container,
|
||||||
shell=self.shell,
|
self.shell,
|
||||||
nodes=self.cluster.storage_nodes,
|
self.cluster.storage_nodes,
|
||||||
request_node=self.cluster.cluster_nodes[0],
|
self.cluster.cluster_nodes[0],
|
||||||
object_getter=get_via_http_curl,
|
get_via_http_curl,
|
||||||
)
|
)
|
||||||
|
|
|
@ -4,13 +4,7 @@ import os
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
from frostfs_testlib.steps.cli.container import delete_container, list_containers, wait_for_container_deletion
|
||||||
from frostfs_testlib.steps.cli.container import (
|
|
||||||
create_container,
|
|
||||||
delete_container,
|
|
||||||
list_containers,
|
|
||||||
wait_for_container_deletion,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.steps.cli.object import delete_object
|
from frostfs_testlib.steps.cli.object import delete_object
|
||||||
from frostfs_testlib.steps.http.http_gate import (
|
from frostfs_testlib.steps.http.http_gate import (
|
||||||
attr_into_str_header_curl,
|
attr_into_str_header_curl,
|
||||||
|
@ -21,9 +15,12 @@ from frostfs_testlib.steps.http.http_gate import (
|
||||||
)
|
)
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container
|
||||||
|
|
||||||
OBJECT_ALREADY_REMOVED_ERROR = "object already removed"
|
OBJECT_ALREADY_REMOVED_ERROR = "object already removed"
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
@ -31,7 +28,6 @@ logger = logging.getLogger("NeoLogger")
|
||||||
@pytest.mark.http_gate
|
@pytest.mark.http_gate
|
||||||
@pytest.mark.http_put
|
@pytest.mark.http_put
|
||||||
class Test_http_headers(ClusterTestBase):
|
class Test_http_headers(ClusterTestBase):
|
||||||
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
|
||||||
obj1_keys = ["Writer", "Chapter1", "Chapter2"]
|
obj1_keys = ["Writer", "Chapter1", "Chapter2"]
|
||||||
obj2_keys = ["Writer", "Ch@pter1", "chapter2"]
|
obj2_keys = ["Writer", "Ch@pter1", "chapter2"]
|
||||||
values = ["Leo Tolstoy", "peace", "w@r"]
|
values = ["Leo Tolstoy", "peace", "w@r"]
|
||||||
|
@ -40,34 +36,23 @@ class Test_http_headers(ClusterTestBase):
|
||||||
{obj2_keys[0]: values[0], obj2_keys[1]: values[1], obj2_keys[2]: values[2]},
|
{obj2_keys[0]: values[0], obj2_keys[1]: values[1], obj2_keys[2]: values[2]},
|
||||||
]
|
]
|
||||||
|
|
||||||
@pytest.fixture(scope="class", autouse=True)
|
@pytest.fixture
|
||||||
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
|
def storage_objects_with_attributes(self, container: str, wallet: WalletInfo, object_size: ObjectSize) -> list[StorageObjectInfo]:
|
||||||
def prepare_wallet(self, default_wallet):
|
|
||||||
Test_http_headers.wallet = default_wallet
|
|
||||||
|
|
||||||
def storage_objects_with_attributes(self, object_size: ObjectSize) -> list[StorageObjectInfo]:
|
|
||||||
# TODO: Deal with http tests
|
# TODO: Deal with http tests
|
||||||
if object_size.value > 1000:
|
if object_size.value > 1000:
|
||||||
pytest.skip("Complex objects for HTTP temporarly disabled for v0.37")
|
pytest.skip("Complex objects for HTTP temporarly disabled for v0.37")
|
||||||
|
|
||||||
storage_objects = []
|
storage_objects = []
|
||||||
wallet = self.wallet
|
|
||||||
cid = create_container(
|
|
||||||
wallet=self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
file_path = generate_file(object_size.value)
|
file_path = generate_file(object_size.value)
|
||||||
for attributes in self.OBJECT_ATTRIBUTES:
|
for attributes in self.OBJECT_ATTRIBUTES:
|
||||||
storage_object_id = upload_via_http_gate_curl(
|
storage_object_id = upload_via_http_gate_curl(
|
||||||
cid=cid,
|
cid=container,
|
||||||
filepath=file_path,
|
filepath=file_path,
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
endpoint=self.cluster.default_http_gate_endpoint,
|
||||||
headers=attr_into_str_header_curl(attributes),
|
headers=attr_into_str_header_curl(attributes),
|
||||||
)
|
)
|
||||||
storage_object = StorageObjectInfo(cid, storage_object_id)
|
storage_object = StorageObjectInfo(container, storage_object_id)
|
||||||
storage_object.size = os.path.getsize(file_path)
|
storage_object.size = os.path.getsize(file_path)
|
||||||
storage_object.wallet = wallet
|
storage_object.wallet = wallet
|
||||||
storage_object.file_path = file_path
|
storage_object.file_path = file_path
|
||||||
|
@ -75,9 +60,10 @@ class Test_http_headers(ClusterTestBase):
|
||||||
|
|
||||||
storage_objects.append(storage_object)
|
storage_objects.append(storage_object)
|
||||||
|
|
||||||
yield storage_objects
|
return storage_objects
|
||||||
|
|
||||||
@allure.title("Get object1 by attribute")
|
@allure.title("Get object1 by attribute (object_size={object_size})")
|
||||||
|
@requires_container(REP_2_1_4_PUBLIC)
|
||||||
def test_object1_can_be_get_by_attr(self, storage_objects_with_attributes: list[StorageObjectInfo]):
|
def test_object1_can_be_get_by_attr(self, storage_objects_with_attributes: list[StorageObjectInfo]):
|
||||||
"""
|
"""
|
||||||
Test to get object#1 by attribute and comapre hashes
|
Test to get object#1 by attribute and comapre hashes
|
||||||
|
@ -99,8 +85,9 @@ class Test_http_headers(ClusterTestBase):
|
||||||
node=self.cluster.cluster_nodes[0],
|
node=self.cluster.cluster_nodes[0],
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Get object2 with different attributes, then delete object2 and get object1")
|
@allure.title("Get object2 with different attributes, then delete object2 and get object1 (object_size={object_size})")
|
||||||
def test_object2_can_be_get_by_attr(self, storage_objects_with_attributes: list[StorageObjectInfo]):
|
@requires_container(REP_2_1_4_PUBLIC)
|
||||||
|
def test_object2_can_be_get_by_attr(self, default_wallet: WalletInfo, storage_objects_with_attributes: list[StorageObjectInfo]):
|
||||||
"""
|
"""
|
||||||
Test to get object2 with different attributes, then delete object2 and get object1 using 1st attribute. Note: obj1 and obj2 have the same attribute#1,
|
Test to get object2 with different attributes, then delete object2 and get object1 using 1st attribute. Note: obj1 and obj2 have the same attribute#1,
|
||||||
and when obj2 is deleted you can get obj1 by 1st attribute
|
and when obj2 is deleted you can get obj1 by 1st attribute
|
||||||
|
@ -131,7 +118,7 @@ class Test_http_headers(ClusterTestBase):
|
||||||
)
|
)
|
||||||
with reporter.step("Delete object#2 and verify is the container deleted"):
|
with reporter.step("Delete object#2 and verify is the container deleted"):
|
||||||
delete_object(
|
delete_object(
|
||||||
wallet=self.wallet,
|
wallet=default_wallet,
|
||||||
cid=storage_object_2.cid,
|
cid=storage_object_2.cid,
|
||||||
oid=storage_object_2.oid,
|
oid=storage_object_2.oid,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
|
@ -145,9 +132,7 @@ class Test_http_headers(ClusterTestBase):
|
||||||
)
|
)
|
||||||
storage_objects_with_attributes.remove(storage_object_2)
|
storage_objects_with_attributes.remove(storage_object_2)
|
||||||
|
|
||||||
with reporter.step(
|
with reporter.step(f'Download object#1 with attributes [Writer={storage_object_1.attributes["Writer"]}] and compare hashes'):
|
||||||
f'Download object#1 with attributes [Writer={storage_object_1.attributes["Writer"]}] and compare hashes'
|
|
||||||
):
|
|
||||||
key_value_pair = {"Writer": storage_object_1.attributes["Writer"]}
|
key_value_pair = {"Writer": storage_object_1.attributes["Writer"]}
|
||||||
get_object_by_attr_and_verify_hashes(
|
get_object_by_attr_and_verify_hashes(
|
||||||
oid=storage_object_1.oid,
|
oid=storage_object_1.oid,
|
||||||
|
@ -157,8 +142,9 @@ class Test_http_headers(ClusterTestBase):
|
||||||
node=self.cluster.cluster_nodes[0],
|
node=self.cluster.cluster_nodes[0],
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("[NEGATIVE] Put object and get right after container is deleted")
|
@allure.title("[NEGATIVE] Put object and get right after container is deleted (object_size={object_size})")
|
||||||
def test_negative_put_and_get_object3(self, storage_objects_with_attributes: list[StorageObjectInfo]):
|
@requires_container(REP_2_1_4_PUBLIC)
|
||||||
|
def test_negative_put_and_get_object3(self, default_wallet: WalletInfo, storage_objects_with_attributes: list[StorageObjectInfo]):
|
||||||
"""
|
"""
|
||||||
Test to attempt to put object and try to download it right after the container has been deleted
|
Test to attempt to put object and try to download it right after the container has been deleted
|
||||||
|
|
||||||
|
@ -188,7 +174,7 @@ class Test_http_headers(ClusterTestBase):
|
||||||
)
|
)
|
||||||
with reporter.step("Delete container and verify container deletion"):
|
with reporter.step("Delete container and verify container deletion"):
|
||||||
delete_container(
|
delete_container(
|
||||||
wallet=self.wallet,
|
wallet=default_wallet,
|
||||||
cid=storage_object_1.cid,
|
cid=storage_object_1.cid,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
endpoint=self.cluster.default_rpc_endpoint,
|
||||||
|
@ -196,14 +182,12 @@ class Test_http_headers(ClusterTestBase):
|
||||||
)
|
)
|
||||||
self.tick_epoch()
|
self.tick_epoch()
|
||||||
wait_for_container_deletion(
|
wait_for_container_deletion(
|
||||||
self.wallet,
|
default_wallet,
|
||||||
storage_object_1.cid,
|
storage_object_1.cid,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
endpoint=self.cluster.default_rpc_endpoint,
|
||||||
)
|
)
|
||||||
assert storage_object_1.cid not in list_containers(
|
assert storage_object_1.cid not in list_containers(default_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
||||||
self.wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
|
|
||||||
)
|
|
||||||
with reporter.step("[Negative] Try to download (wget) object via wget with attributes [peace=peace]"):
|
with reporter.step("[Negative] Try to download (wget) object via wget with attributes [peace=peace]"):
|
||||||
request = f"/get/{storage_object_1.cid}/peace/peace"
|
request = f"/get/{storage_object_1.cid}/peace/peace"
|
||||||
error_pattern = "404 Not Found"
|
error_pattern = "404 Not Found"
|
||||||
|
|
|
@ -3,9 +3,7 @@ import logging
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper
|
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
||||||
from frostfs_testlib.steps.http.http_gate import (
|
from frostfs_testlib.steps.http.http_gate import (
|
||||||
assert_hashes_are_equal,
|
assert_hashes_are_equal,
|
||||||
|
@ -15,25 +13,22 @@ from frostfs_testlib.steps.http.http_gate import (
|
||||||
verify_object_hash,
|
verify_object_hash,
|
||||||
)
|
)
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import TestFile
|
||||||
|
|
||||||
|
from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
@pytest.mark.http_gate
|
@pytest.mark.http_gate
|
||||||
class Test_http_object(ClusterTestBase):
|
class Test_http_object(ClusterTestBase):
|
||||||
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
|
||||||
|
|
||||||
@pytest.fixture(scope="class", autouse=True)
|
|
||||||
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
|
|
||||||
def prepare_wallet(self, default_wallet):
|
|
||||||
Test_http_object.wallet = default_wallet
|
|
||||||
|
|
||||||
@allure.title("Put over gRPC, Get over HTTP with attributes (obj_size={object_size})")
|
@allure.title("Put over gRPC, Get over HTTP with attributes (obj_size={object_size})")
|
||||||
def test_object_put_get_attributes(self, object_size: ObjectSize):
|
@requires_container(REP_2_1_4_PUBLIC)
|
||||||
|
def test_object_put_get_attributes(self, default_wallet: WalletInfo, container: str, test_file: TestFile):
|
||||||
"""
|
"""
|
||||||
Test that object can be put using gRPC interface and got using HTTP.
|
Test that object can be put using gRPC interface and got using HTTP.
|
||||||
|
|
||||||
|
@ -52,18 +47,6 @@ class Test_http_object(ClusterTestBase):
|
||||||
Hashes must be the same.
|
Hashes must be the same.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with reporter.step("Create public container"):
|
|
||||||
cid = create_container(
|
|
||||||
self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Generate file
|
|
||||||
file_path = generate_file(object_size.value)
|
|
||||||
|
|
||||||
# List of Key=Value attributes
|
# List of Key=Value attributes
|
||||||
obj_key1 = "chapter1"
|
obj_key1 = "chapter1"
|
||||||
obj_value1 = "peace"
|
obj_value1 = "peace"
|
||||||
|
@ -76,9 +59,9 @@ class Test_http_object(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Put objects using gRPC [--attributes chapter1=peace,chapter2=war]"):
|
with reporter.step("Put objects using gRPC [--attributes chapter1=peace,chapter2=war]"):
|
||||||
oid = put_object_to_random_node(
|
oid = put_object_to_random_node(
|
||||||
wallet=self.wallet,
|
wallet=default_wallet,
|
||||||
path=file_path,
|
path=test_file.path,
|
||||||
cid=cid,
|
cid=container,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
cluster=self.cluster,
|
cluster=self.cluster,
|
||||||
attributes=f"{key_value1},{key_value2}",
|
attributes=f"{key_value1},{key_value2}",
|
||||||
|
@ -86,9 +69,9 @@ class Test_http_object(ClusterTestBase):
|
||||||
with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"):
|
with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"):
|
||||||
verify_object_hash(
|
verify_object_hash(
|
||||||
oid=oid,
|
oid=oid,
|
||||||
file_name=file_path,
|
file_name=test_file.path,
|
||||||
wallet=self.wallet,
|
wallet=default_wallet,
|
||||||
cid=cid,
|
cid=container,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
nodes=self.cluster.storage_nodes,
|
nodes=self.cluster.storage_nodes,
|
||||||
request_node=self.cluster.cluster_nodes[0],
|
request_node=self.cluster.cluster_nodes[0],
|
||||||
|
@ -96,10 +79,10 @@ class Test_http_object(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("[Negative] try to get object: [get/$CID/chapter1/peace]"):
|
with reporter.step("[Negative] try to get object: [get/$CID/chapter1/peace]"):
|
||||||
attrs = {obj_key1: obj_value1, obj_key2: obj_value2}
|
attrs = {obj_key1: obj_value1, obj_key2: obj_value2}
|
||||||
request = f"/get/{cid}/{obj_key1}/{obj_value1}"
|
request = f"/get/{container}/{obj_key1}/{obj_value1}"
|
||||||
expected_err_msg = "Failed to get object via HTTP gate:"
|
expected_err_msg = "Failed to get object via HTTP gate:"
|
||||||
try_to_get_object_via_passed_request_and_expect_error(
|
try_to_get_object_via_passed_request_and_expect_error(
|
||||||
cid=cid,
|
cid=container,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
node=self.cluster.cluster_nodes[0],
|
node=self.cluster.cluster_nodes[0],
|
||||||
error_pattern=expected_err_msg,
|
error_pattern=expected_err_msg,
|
||||||
|
@ -110,15 +93,15 @@ class Test_http_object(ClusterTestBase):
|
||||||
with reporter.step("Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"):
|
with reporter.step("Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"):
|
||||||
get_object_by_attr_and_verify_hashes(
|
get_object_by_attr_and_verify_hashes(
|
||||||
oid=oid,
|
oid=oid,
|
||||||
file_name=file_path,
|
file_name=test_file.path,
|
||||||
cid=cid,
|
cid=container,
|
||||||
attrs=attrs,
|
attrs=attrs,
|
||||||
node=self.cluster.cluster_nodes[0],
|
node=self.cluster.cluster_nodes[0],
|
||||||
)
|
)
|
||||||
with reporter.step("[Negative] try to get object: get_by_attribute/$CID/$OID"):
|
with reporter.step("[Negative] try to get object: get_by_attribute/$CID/$OID"):
|
||||||
request = f"/get_by_attribute/{cid}/{oid}"
|
request = f"/get_by_attribute/{container}/{oid}"
|
||||||
try_to_get_object_via_passed_request_and_expect_error(
|
try_to_get_object_via_passed_request_and_expect_error(
|
||||||
cid=cid,
|
cid=container,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
node=self.cluster.cluster_nodes[0],
|
node=self.cluster.cluster_nodes[0],
|
||||||
error_pattern=expected_err_msg,
|
error_pattern=expected_err_msg,
|
||||||
|
@ -127,7 +110,7 @@ class Test_http_object(ClusterTestBase):
|
||||||
|
|
||||||
@allure.title("Put over s3, Get over HTTP with bucket name and key (object_size={object_size})")
|
@allure.title("Put over s3, Get over HTTP with bucket name and key (object_size={object_size})")
|
||||||
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
|
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
|
||||||
def test_object_put_get_bucketname_key(self, object_size: ObjectSize, s3_client: S3ClientWrapper):
|
def test_object_put_get_bucketname_key(self, test_file: TestFile, s3_client: S3ClientWrapper):
|
||||||
"""
|
"""
|
||||||
Test that object can be put using s3-gateway interface and got via HTTP with bucket name and object key.
|
Test that object can be put using s3-gateway interface and got via HTTP with bucket name and object key.
|
||||||
|
|
||||||
|
@ -142,10 +125,9 @@ class Test_http_object(ClusterTestBase):
|
||||||
Hashes must be the same.
|
Hashes must be the same.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
file_path = generate_file(object_size.value)
|
object_key = s3_helper.object_key_from_file_path(test_file.path)
|
||||||
object_key = s3_helper.object_key_from_file_path(file_path)
|
|
||||||
bucket = s3_client.create_bucket(acl="public-read-write")
|
bucket = s3_client.create_bucket(acl="public-read-write")
|
||||||
s3_client.put_object(bucket=bucket, filepath=file_path, key=object_key)
|
s3_client.put_object(bucket=bucket, filepath=test_file.path, key=object_key)
|
||||||
obj_s3 = s3_client.get_object(bucket=bucket, key=object_key)
|
obj_s3 = s3_client.get_object(bucket=bucket, key=object_key)
|
||||||
|
|
||||||
request = f"/get/{bucket}/{object_key}"
|
request = f"/get/{bucket}/{object_key}"
|
||||||
|
@ -156,4 +138,4 @@ class Test_http_object(ClusterTestBase):
|
||||||
request_path=request,
|
request_path=request,
|
||||||
)
|
)
|
||||||
with reporter.step("Verify hashes"):
|
with reporter.step("Verify hashes"):
|
||||||
assert_hashes_are_equal(file_path, obj_http, obj_s3)
|
assert_hashes_are_equal(test_file.path, obj_http, obj_s3)
|
||||||
|
|
|
@ -3,28 +3,23 @@ import logging
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
|
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.http_gate
|
@pytest.mark.http_gate
|
||||||
@pytest.mark.http_put
|
@pytest.mark.http_put
|
||||||
class Test_http_streaming(ClusterTestBase):
|
class Test_http_streaming(ClusterTestBase):
|
||||||
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
|
||||||
|
|
||||||
@pytest.fixture(scope="class", autouse=True)
|
|
||||||
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
|
|
||||||
def prepare_wallet(self, default_wallet):
|
|
||||||
Test_http_streaming.wallet = default_wallet
|
|
||||||
|
|
||||||
@allure.title("Put via pipe (streaming), Get over HTTP and verify hashes")
|
@allure.title("Put via pipe (streaming), Get over HTTP and verify hashes")
|
||||||
def test_object_can_be_put_get_by_streaming(self, complex_object_size: ObjectSize):
|
@requires_container(REP_2_1_4_PUBLIC)
|
||||||
|
def test_object_can_be_put_get_by_streaming(self, default_wallet: WalletInfo, container: str, complex_object_size: ObjectSize):
|
||||||
"""
|
"""
|
||||||
Test that object can be put using gRPC interface and get using HTTP.
|
Test that object can be put using gRPC interface and get using HTTP.
|
||||||
|
|
||||||
|
@ -37,27 +32,20 @@ class Test_http_streaming(ClusterTestBase):
|
||||||
Expected result:
|
Expected result:
|
||||||
Hashes must be the same.
|
Hashes must be the same.
|
||||||
"""
|
"""
|
||||||
with reporter.step("Create public container and verify container creation"):
|
|
||||||
cid = create_container(
|
|
||||||
self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
with reporter.step("Allocate big object"):
|
with reporter.step("Allocate big object"):
|
||||||
# Generate file
|
# Generate file
|
||||||
file_path = generate_file(complex_object_size.value)
|
file_path = generate_file(complex_object_size.value)
|
||||||
|
|
||||||
with reporter.step("Put objects using curl utility and Get object and verify hashes [ get/$CID/$OID ]"):
|
with reporter.step("Put objects using curl utility"):
|
||||||
oid = upload_via_http_gate_curl(
|
oid = upload_via_http_gate_curl(container, file_path, self.cluster.default_http_gate_endpoint)
|
||||||
cid=cid, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint
|
|
||||||
)
|
with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"):
|
||||||
verify_object_hash(
|
verify_object_hash(
|
||||||
oid=oid,
|
oid=oid,
|
||||||
file_name=file_path,
|
file_name=file_path,
|
||||||
wallet=self.wallet,
|
wallet=default_wallet,
|
||||||
cid=cid,
|
cid=container,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
nodes=self.cluster.storage_nodes,
|
nodes=self.cluster.storage_nodes,
|
||||||
request_node=self.cluster.cluster_nodes[0],
|
request_node=self.cluster.cluster_nodes[0],
|
||||||
|
|
|
@ -7,8 +7,6 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
||||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
|
||||||
from frostfs_testlib.steps.cli.object import get_netmap_netinfo, get_object_from_random_node, head_object
|
from frostfs_testlib.steps.cli.object import get_netmap_netinfo, get_object_from_random_node, head_object
|
||||||
from frostfs_testlib.steps.epoch import get_epoch, wait_for_epochs_align
|
from frostfs_testlib.steps.epoch import get_epoch, wait_for_epochs_align
|
||||||
from frostfs_testlib.steps.http.http_gate import (
|
from frostfs_testlib.steps.http.http_gate import (
|
||||||
|
@ -18,12 +16,18 @@ from frostfs_testlib.steps.http.http_gate import (
|
||||||
verify_object_hash,
|
verify_object_hash,
|
||||||
)
|
)
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ....helpers.container_request import REP_2_1_2_PUBLIC, requires_container
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
EXPIRATION_TIMESTAMP_HEADER = "__SYSTEM__EXPIRATION_TIMESTAMP"
|
EXPIRATION_TIMESTAMP_HEADER = "__SYSTEM__EXPIRATION_TIMESTAMP"
|
||||||
|
|
||||||
|
# TODO: Depreacated. Use EXPIRATION_EPOCH_ATTRIBUTE from testlib
|
||||||
EXPIRATION_EPOCH_HEADER = "__SYSTEM__EXPIRATION_EPOCH"
|
EXPIRATION_EPOCH_HEADER = "__SYSTEM__EXPIRATION_EPOCH"
|
||||||
|
|
||||||
EXPIRATION_DURATION_HEADER = "__SYSTEM__EXPIRATION_DURATION"
|
EXPIRATION_DURATION_HEADER = "__SYSTEM__EXPIRATION_DURATION"
|
||||||
EXPIRATION_EXPIRATION_RFC = "__SYSTEM__EXPIRATION_RFC3339"
|
EXPIRATION_EXPIRATION_RFC = "__SYSTEM__EXPIRATION_RFC3339"
|
||||||
SYSTEM_EXPIRATION_EPOCH = "System-Expiration-Epoch"
|
SYSTEM_EXPIRATION_EPOCH = "System-Expiration-Epoch"
|
||||||
|
@ -35,29 +39,11 @@ SYSTEM_EXPIRATION_RFC3339 = "System-Expiration-RFC3339"
|
||||||
@pytest.mark.http_gate
|
@pytest.mark.http_gate
|
||||||
@pytest.mark.http_put
|
@pytest.mark.http_put
|
||||||
class Test_http_system_header(ClusterTestBase):
|
class Test_http_system_header(ClusterTestBase):
|
||||||
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
|
|
||||||
|
|
||||||
@pytest.fixture(scope="class", autouse=True)
|
|
||||||
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
|
|
||||||
def prepare_wallet(self, default_wallet):
|
|
||||||
Test_http_system_header.wallet = default_wallet
|
|
||||||
|
|
||||||
@pytest.fixture(scope="class")
|
|
||||||
@allure.title("Create container")
|
|
||||||
def user_container(self):
|
|
||||||
return create_container(
|
|
||||||
wallet=self.wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule=self.PLACEMENT_RULE,
|
|
||||||
basic_acl=PUBLIC_ACL,
|
|
||||||
)
|
|
||||||
|
|
||||||
@pytest.fixture(scope="class")
|
@pytest.fixture(scope="class")
|
||||||
@allure.title("epoch_duration in seconds")
|
@allure.title("epoch_duration in seconds")
|
||||||
def epoch_duration(self) -> int:
|
def epoch_duration(self, default_wallet: WalletInfo) -> int:
|
||||||
net_info = get_netmap_netinfo(
|
net_info = get_netmap_netinfo(
|
||||||
wallet=self.wallet,
|
wallet=default_wallet,
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
endpoint=self.cluster.default_rpc_endpoint,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
)
|
)
|
||||||
|
@ -80,7 +66,7 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
else:
|
else:
|
||||||
return str(calendar.timegm(future_datetime.timetuple()))
|
return str(calendar.timegm(future_datetime.timetuple()))
|
||||||
|
|
||||||
@allure.title("Check is (header_output) Key=Value exists and equal in passed (header_to_find)")
|
@allure.title("Check if (header_output) Key=Value exists and equal in passed (header_to_find)")
|
||||||
def check_key_value_presented_header(self, header_output: dict, header_to_find: dict) -> bool:
|
def check_key_value_presented_header(self, header_output: dict, header_to_find: dict) -> bool:
|
||||||
header_att = header_output["header"]["attributes"]
|
header_att = header_output["header"]["attributes"]
|
||||||
for key_to_check, val_to_check in header_to_find.items():
|
for key_to_check, val_to_check in header_to_find.items():
|
||||||
|
@ -109,25 +95,25 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
), f"Only {EXPIRATION_EXPIRATION_RFC} can be displayed in header attributes"
|
), f"Only {EXPIRATION_EXPIRATION_RFC} can be displayed in header attributes"
|
||||||
|
|
||||||
@allure.title("Put / get / verify object and return head command result to invoker")
|
@allure.title("Put / get / verify object and return head command result to invoker")
|
||||||
def oid_header_info_for_object(self, file_path: str, attributes: dict, user_container: str):
|
def oid_header_info_for_object(self, default_wallet: WalletInfo, container: str, test_file: str, attributes: dict):
|
||||||
oid = upload_via_http_gate_curl(
|
oid = upload_via_http_gate_curl(
|
||||||
cid=user_container,
|
cid=container,
|
||||||
filepath=file_path,
|
filepath=test_file,
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
endpoint=self.cluster.default_http_gate_endpoint,
|
||||||
headers=attr_into_str_header_curl(attributes),
|
headers=attr_into_str_header_curl(attributes),
|
||||||
)
|
)
|
||||||
verify_object_hash(
|
verify_object_hash(
|
||||||
oid=oid,
|
oid=oid,
|
||||||
file_name=file_path,
|
file_name=test_file,
|
||||||
wallet=self.wallet,
|
wallet=default_wallet,
|
||||||
cid=user_container,
|
cid=container,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
nodes=self.cluster.storage_nodes,
|
nodes=self.cluster.storage_nodes,
|
||||||
request_node=self.cluster.cluster_nodes[0],
|
request_node=self.cluster.cluster_nodes[0],
|
||||||
)
|
)
|
||||||
head = head_object(
|
head = head_object(
|
||||||
wallet=self.wallet,
|
wallet=default_wallet,
|
||||||
cid=user_container,
|
cid=container,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
endpoint=self.cluster.default_rpc_endpoint,
|
||||||
|
@ -135,12 +121,13 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
return oid, head
|
return oid, head
|
||||||
|
|
||||||
@allure.title("[NEGATIVE] Put object with expired epoch")
|
@allure.title("[NEGATIVE] Put object with expired epoch")
|
||||||
def test_unable_put_expired_epoch(self, user_container: str, simple_object_size: ObjectSize):
|
@requires_container(REP_2_1_2_PUBLIC)
|
||||||
|
def test_unable_put_expired_epoch(self, container: str, simple_object_size: ObjectSize):
|
||||||
headers = attr_into_str_header_curl({"System-Expiration-Epoch": str(get_epoch(self.shell, self.cluster) - 1)})
|
headers = attr_into_str_header_curl({"System-Expiration-Epoch": str(get_epoch(self.shell, self.cluster) - 1)})
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
with reporter.step("Put object using HTTP with attribute Expiration-Epoch where epoch is expired"):
|
with reporter.step("Put object using HTTP with attribute Expiration-Epoch where epoch is expired"):
|
||||||
upload_via_http_gate_curl(
|
upload_via_http_gate_curl(
|
||||||
cid=user_container,
|
cid=container,
|
||||||
filepath=file_path,
|
filepath=file_path,
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
endpoint=self.cluster.default_http_gate_endpoint,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
|
@ -148,14 +135,13 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("[NEGATIVE] Put object with negative System-Expiration-Duration")
|
@allure.title("[NEGATIVE] Put object with negative System-Expiration-Duration")
|
||||||
def test_unable_put_negative_duration(self, user_container: str, simple_object_size: ObjectSize):
|
@requires_container(REP_2_1_2_PUBLIC)
|
||||||
|
def test_unable_put_negative_duration(self, container: str, simple_object_size: ObjectSize):
|
||||||
headers = attr_into_str_header_curl({"System-Expiration-Duration": "-1h"})
|
headers = attr_into_str_header_curl({"System-Expiration-Duration": "-1h"})
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
with reporter.step(
|
with reporter.step("Put object using HTTP with attribute System-Expiration-Duration where duration is negative"):
|
||||||
"Put object using HTTP with attribute System-Expiration-Duration where duration is negative"
|
|
||||||
):
|
|
||||||
upload_via_http_gate_curl(
|
upload_via_http_gate_curl(
|
||||||
cid=user_container,
|
cid=container,
|
||||||
filepath=file_path,
|
filepath=file_path,
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
endpoint=self.cluster.default_http_gate_endpoint,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
|
@ -163,28 +149,26 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("[NEGATIVE] Put object with System-Expiration-Timestamp value in the past")
|
@allure.title("[NEGATIVE] Put object with System-Expiration-Timestamp value in the past")
|
||||||
def test_unable_put_expired_timestamp(self, user_container: str, simple_object_size: ObjectSize):
|
@requires_container(REP_2_1_2_PUBLIC)
|
||||||
|
def test_unable_put_expired_timestamp(self, container: str, simple_object_size: ObjectSize):
|
||||||
headers = attr_into_str_header_curl({"System-Expiration-Timestamp": "1635075727"})
|
headers = attr_into_str_header_curl({"System-Expiration-Timestamp": "1635075727"})
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
with reporter.step(
|
with reporter.step("Put object using HTTP with attribute System-Expiration-Timestamp where duration is in the past"):
|
||||||
"Put object using HTTP with attribute System-Expiration-Timestamp where duration is in the past"
|
|
||||||
):
|
|
||||||
upload_via_http_gate_curl(
|
upload_via_http_gate_curl(
|
||||||
cid=user_container,
|
cid=container,
|
||||||
filepath=file_path,
|
filepath=file_path,
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
endpoint=self.cluster.default_http_gate_endpoint,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
error_pattern=f"{EXPIRATION_TIMESTAMP_HEADER} must be in the future",
|
error_pattern=f"{EXPIRATION_TIMESTAMP_HEADER} must be in the future",
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title(
|
@allure.title("[NEGATIVE] Put object using HTTP with attribute System-Expiration-RFC3339 where duration is in the past")
|
||||||
"[NEGATIVE] Put object using HTTP with attribute System-Expiration-RFC3339 where duration is in the past"
|
@requires_container(REP_2_1_2_PUBLIC)
|
||||||
)
|
def test_unable_put_expired_rfc(self, container: str, simple_object_size: ObjectSize):
|
||||||
def test_unable_put_expired_rfc(self, user_container: str, simple_object_size: ObjectSize):
|
|
||||||
headers = attr_into_str_header_curl({"System-Expiration-RFC3339": "2021-11-22T09:55:49Z"})
|
headers = attr_into_str_header_curl({"System-Expiration-RFC3339": "2021-11-22T09:55:49Z"})
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
upload_via_http_gate_curl(
|
upload_via_http_gate_curl(
|
||||||
cid=user_container,
|
cid=container,
|
||||||
filepath=file_path,
|
filepath=file_path,
|
||||||
endpoint=self.cluster.default_http_gate_endpoint,
|
endpoint=self.cluster.default_http_gate_endpoint,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
|
@ -192,7 +176,10 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Priority of attributes epoch>duration (obj_size={object_size})")
|
@allure.title("Priority of attributes epoch>duration (obj_size={object_size})")
|
||||||
def test_http_attr_priority_epoch_duration(self, user_container: str, object_size: ObjectSize, epoch_duration: int):
|
@requires_container(REP_2_1_2_PUBLIC)
|
||||||
|
def test_http_attr_priority_epoch_duration(
|
||||||
|
self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int
|
||||||
|
):
|
||||||
self.tick_epoch()
|
self.tick_epoch()
|
||||||
epoch_count = 1
|
epoch_count = 1
|
||||||
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
|
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
|
||||||
|
@ -204,9 +191,7 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
with reporter.step(
|
with reporter.step(
|
||||||
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
|
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
|
||||||
):
|
):
|
||||||
oid, head_info = self.oid_header_info_for_object(
|
oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container)
|
||||||
file_path=file_path, attributes=attributes, user_container=user_container
|
|
||||||
)
|
|
||||||
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
|
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
|
||||||
with reporter.step("Check that object becomes unavailable when epoch is expired"):
|
with reporter.step("Check that object becomes unavailable when epoch is expired"):
|
||||||
for _ in range(0, epoch_count + 1):
|
for _ in range(0, epoch_count + 1):
|
||||||
|
@ -218,17 +203,20 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
with reporter.step("Check object deleted because it expires-on epoch"):
|
with reporter.step("Check object deleted because it expires-on epoch"):
|
||||||
wait_for_epochs_align(self.shell, self.cluster)
|
wait_for_epochs_align(self.shell, self.cluster)
|
||||||
try_to_get_object_and_expect_error(
|
try_to_get_object_and_expect_error(
|
||||||
cid=user_container,
|
cid=container,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
node=self.cluster.cluster_nodes[0],
|
node=self.cluster.cluster_nodes[0],
|
||||||
error_pattern="404 Not Found",
|
error_pattern="404 Not Found",
|
||||||
)
|
)
|
||||||
# check that object is not available via grpc
|
# check that object is not available via grpc
|
||||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||||
get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster)
|
get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster)
|
||||||
|
|
||||||
@allure.title("Priority of attributes duration>timestamp (obj_size={object_size})")
|
@allure.title("Priority of attributes duration>timestamp (obj_size={object_size})")
|
||||||
def test_http_attr_priority_dur_timestamp(self, user_container: str, object_size: ObjectSize, epoch_duration: int):
|
@requires_container(REP_2_1_2_PUBLIC)
|
||||||
|
def test_http_attr_priority_dur_timestamp(
|
||||||
|
self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int
|
||||||
|
):
|
||||||
self.tick_epoch()
|
self.tick_epoch()
|
||||||
epoch_count = 2
|
epoch_count = 2
|
||||||
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
|
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
|
||||||
|
@ -243,9 +231,7 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
with reporter.step(
|
with reporter.step(
|
||||||
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
|
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
|
||||||
):
|
):
|
||||||
oid, head_info = self.oid_header_info_for_object(
|
oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container)
|
||||||
file_path=file_path, attributes=attributes, user_container=user_container
|
|
||||||
)
|
|
||||||
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
|
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
|
||||||
with reporter.step("Check that object becomes unavailable when epoch is expired"):
|
with reporter.step("Check that object becomes unavailable when epoch is expired"):
|
||||||
for _ in range(0, epoch_count + 1):
|
for _ in range(0, epoch_count + 1):
|
||||||
|
@ -257,65 +243,19 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
with reporter.step("Check object deleted because it expires-on epoch"):
|
with reporter.step("Check object deleted because it expires-on epoch"):
|
||||||
wait_for_epochs_align(self.shell, self.cluster)
|
wait_for_epochs_align(self.shell, self.cluster)
|
||||||
try_to_get_object_and_expect_error(
|
try_to_get_object_and_expect_error(
|
||||||
cid=user_container,
|
cid=container,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
node=self.cluster.cluster_nodes[0],
|
node=self.cluster.cluster_nodes[0],
|
||||||
error_pattern="404 Not Found",
|
error_pattern="404 Not Found",
|
||||||
)
|
)
|
||||||
# check that object is not available via grpc
|
# check that object is not available via grpc
|
||||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||||
get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster)
|
get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster)
|
||||||
|
|
||||||
@allure.title("Priority of attributes timestamp>Expiration-RFC (obj_size={object_size})")
|
@allure.title("Priority of attributes timestamp>Expiration-RFC (obj_size={object_size})")
|
||||||
def test_http_attr_priority_timestamp_rfc(self, user_container: str, object_size: ObjectSize, epoch_duration: int):
|
@requires_container(REP_2_1_2_PUBLIC)
|
||||||
self.tick_epoch()
|
def test_http_attr_priority_timestamp_rfc(
|
||||||
epoch_count = 2
|
self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int
|
||||||
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
|
|
||||||
logger.info(
|
|
||||||
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
|
|
||||||
)
|
|
||||||
attributes = {
|
|
||||||
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=2),
|
|
||||||
SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(
|
|
||||||
epoch_duration=epoch_duration, epoch=1, rfc3339=True
|
|
||||||
),
|
|
||||||
}
|
|
||||||
file_path = generate_file(object_size.value)
|
|
||||||
with reporter.step(
|
|
||||||
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
|
|
||||||
):
|
|
||||||
oid, head_info = self.oid_header_info_for_object(
|
|
||||||
file_path=file_path, attributes=attributes, user_container=user_container
|
|
||||||
)
|
|
||||||
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
|
|
||||||
with reporter.step("Check that object becomes unavailable when epoch is expired"):
|
|
||||||
for _ in range(0, epoch_count + 1):
|
|
||||||
self.tick_epoch()
|
|
||||||
assert (
|
|
||||||
get_epoch(self.shell, self.cluster) == expected_epoch + 1
|
|
||||||
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
|
|
||||||
|
|
||||||
with reporter.step("Check object deleted because it expires-on epoch"):
|
|
||||||
wait_for_epochs_align(self.shell, self.cluster)
|
|
||||||
try_to_get_object_and_expect_error(
|
|
||||||
cid=user_container,
|
|
||||||
oid=oid,
|
|
||||||
node=self.cluster.cluster_nodes[0],
|
|
||||||
error_pattern="404 Not Found",
|
|
||||||
)
|
|
||||||
# check that object is not available via grpc
|
|
||||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
|
||||||
get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster)
|
|
||||||
|
|
||||||
@allure.title("Object should be deleted when expiration passed (obj_size={object_size})")
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"object_size",
|
|
||||||
# TODO: "complex" temporarly disabled for v0.37
|
|
||||||
["simple"],
|
|
||||||
indirect=True,
|
|
||||||
)
|
|
||||||
def test_http_rfc_object_unavailable_after_expir(
|
|
||||||
self, user_container: str, object_size: ObjectSize, epoch_duration: int
|
|
||||||
):
|
):
|
||||||
self.tick_epoch()
|
self.tick_epoch()
|
||||||
epoch_count = 2
|
epoch_count = 2
|
||||||
|
@ -324,19 +264,57 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
|
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
|
||||||
)
|
)
|
||||||
attributes = {
|
attributes = {
|
||||||
SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(
|
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=2),
|
||||||
epoch_duration=epoch_duration, epoch=2, rfc3339=True
|
SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=1, rfc3339=True),
|
||||||
)
|
|
||||||
}
|
}
|
||||||
file_path = generate_file(object_size.value)
|
file_path = generate_file(object_size.value)
|
||||||
with reporter.step(
|
with reporter.step(
|
||||||
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
|
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
|
||||||
):
|
):
|
||||||
oid, head_info = self.oid_header_info_for_object(
|
oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container)
|
||||||
file_path=file_path,
|
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
|
||||||
attributes=attributes,
|
with reporter.step("Check that object becomes unavailable when epoch is expired"):
|
||||||
user_container=user_container,
|
for _ in range(0, epoch_count + 1):
|
||||||
)
|
self.tick_epoch()
|
||||||
|
assert (
|
||||||
|
get_epoch(self.shell, self.cluster) == expected_epoch + 1
|
||||||
|
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
|
||||||
|
|
||||||
|
with reporter.step("Check object deleted because it expires-on epoch"):
|
||||||
|
wait_for_epochs_align(self.shell, self.cluster)
|
||||||
|
try_to_get_object_and_expect_error(
|
||||||
|
cid=container,
|
||||||
|
oid=oid,
|
||||||
|
node=self.cluster.cluster_nodes[0],
|
||||||
|
error_pattern="404 Not Found",
|
||||||
|
)
|
||||||
|
# check that object is not available via grpc
|
||||||
|
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||||
|
get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster)
|
||||||
|
|
||||||
|
@allure.title("Object should be deleted when expiration passed (obj_size={object_size})")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"object_size",
|
||||||
|
# TODO: "complex" temporarly disabled for v0.37
|
||||||
|
["simple"],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
@requires_container(REP_2_1_2_PUBLIC)
|
||||||
|
def test_http_rfc_object_unavailable_after_expir(
|
||||||
|
self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int
|
||||||
|
):
|
||||||
|
self.tick_epoch()
|
||||||
|
epoch_count = 2
|
||||||
|
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
|
||||||
|
logger.info(
|
||||||
|
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
|
||||||
|
)
|
||||||
|
attributes = {SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=2, rfc3339=True)}
|
||||||
|
file_path = generate_file(object_size.value)
|
||||||
|
with reporter.step(
|
||||||
|
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
|
||||||
|
):
|
||||||
|
oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container)
|
||||||
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
|
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
|
||||||
with reporter.step("Check that object becomes unavailable when epoch is expired"):
|
with reporter.step("Check that object becomes unavailable when epoch is expired"):
|
||||||
for _ in range(0, epoch_count + 1):
|
for _ in range(0, epoch_count + 1):
|
||||||
|
@ -349,11 +327,11 @@ class Test_http_system_header(ClusterTestBase):
|
||||||
with reporter.step("Check object deleted because it expires-on epoch"):
|
with reporter.step("Check object deleted because it expires-on epoch"):
|
||||||
wait_for_epochs_align(self.shell, self.cluster)
|
wait_for_epochs_align(self.shell, self.cluster)
|
||||||
try_to_get_object_and_expect_error(
|
try_to_get_object_and_expect_error(
|
||||||
cid=user_container,
|
cid=container,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
node=self.cluster.cluster_nodes[0],
|
node=self.cluster.cluster_nodes[0],
|
||||||
error_pattern="404 Not Found",
|
error_pattern="404 Not Found",
|
||||||
)
|
)
|
||||||
# check that object is not available via grpc
|
# check that object is not available via grpc
|
||||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||||
get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster)
|
get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster)
|
||||||
|
|
0
pytest_tests/testsuites/services/s3_gate/__init__.py
Normal file
0
pytest_tests/testsuites/services/s3_gate/__init__.py
Normal file
|
@ -9,6 +9,7 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.acl
|
@pytest.mark.acl
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
class TestS3GateACL:
|
class TestS3GateACL:
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
import string
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
|
@ -6,9 +7,17 @@ from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.utils import string_utils
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
VALID_SYMBOLS_WITHOUT_DOT = string.ascii_lowercase + string.digits + "-"
|
||||||
|
VALID_AND_INVALID_SYMBOLS = string.ascii_letters + string.punctuation
|
||||||
|
|
||||||
|
# TODO: The dot symbol is temporarily not supported.
|
||||||
|
VALID_SYMBOLS_WITH_DOT = VALID_SYMBOLS_WITHOUT_DOT + "."
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
@pytest.mark.s3_gate_bucket
|
@pytest.mark.s3_gate_bucket
|
||||||
class TestS3GateBucket:
|
class TestS3GateBucket:
|
||||||
|
@ -139,3 +148,87 @@ class TestS3GateBucket:
|
||||||
s3_client.delete_bucket(bucket)
|
s3_client.delete_bucket(bucket)
|
||||||
with pytest.raises(Exception, match=r".*Not Found.*"):
|
with pytest.raises(Exception, match=r".*Not Found.*"):
|
||||||
s3_client.head_bucket(bucket)
|
s3_client.head_bucket(bucket)
|
||||||
|
|
||||||
|
@allure.title("Create bucket with valid name length (s3_client={s3_client}, length={length})")
|
||||||
|
@pytest.mark.parametrize("length", [3, 4, 32, 62, 63])
|
||||||
|
def test_s3_create_bucket_with_valid_length(self, s3_client: S3ClientWrapper, length: int):
|
||||||
|
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
|
||||||
|
while not (bucket_name[0].isalnum() and bucket_name[-1].isalnum()):
|
||||||
|
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
|
||||||
|
|
||||||
|
with reporter.step("Create bucket with valid name length"):
|
||||||
|
s3_client.create_bucket(bucket_name)
|
||||||
|
|
||||||
|
with reporter.step("Check bucket name in buckets"):
|
||||||
|
assert bucket_name in s3_client.list_buckets()
|
||||||
|
|
||||||
|
@allure.title("[NEGATIVE] Bucket with invalid name length should not be created (s3_client={s3_client}, length={length})")
|
||||||
|
@pytest.mark.parametrize("length", [2, 64, 254, 255, 256])
|
||||||
|
def test_s3_create_bucket_with_invalid_length(self, s3_client: S3ClientWrapper, length: int):
|
||||||
|
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
|
||||||
|
while not (bucket_name[0].isalnum() and bucket_name[-1].isalnum()):
|
||||||
|
bucket_name = string_utils.random_string(length, VALID_SYMBOLS_WITHOUT_DOT)
|
||||||
|
|
||||||
|
with reporter.step("Create bucket with invalid name length and catch exception"):
|
||||||
|
with pytest.raises(Exception, match=".*(?:InvalidBucketName|Invalid bucket name).*"):
|
||||||
|
s3_client.create_bucket(bucket_name)
|
||||||
|
|
||||||
|
@allure.title("[NEGATIVE] Bucket with invalid name should not be created (s3_client={s3_client}, bucket_name={bucket_name})")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"bucket_name",
|
||||||
|
[
|
||||||
|
"BUCKET-1",
|
||||||
|
"buckeT-2",
|
||||||
|
# The following case for AWS CLI is not handled correctly
|
||||||
|
# "-bucket-3",
|
||||||
|
"bucket-4-",
|
||||||
|
".bucket-5",
|
||||||
|
"bucket-6.",
|
||||||
|
"bucket..7",
|
||||||
|
"bucket+8",
|
||||||
|
"bucket_9",
|
||||||
|
"bucket 10",
|
||||||
|
"127.10.5.11",
|
||||||
|
"xn--bucket-12",
|
||||||
|
"bucket-13-s3alias",
|
||||||
|
# The following names can be used in FrostFS but are prohibited by the AWS specification.
|
||||||
|
# "sthree-bucket-14"
|
||||||
|
# "sthree-configurator-bucket-15"
|
||||||
|
# "amzn-s3-demo-bucket-16"
|
||||||
|
# "sthree-bucket-17"
|
||||||
|
# "bucket-18--ol-s3"
|
||||||
|
# "bucket-19--x-s3"
|
||||||
|
# "bucket-20.mrap"
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_s3_create_bucket_with_invalid_name(self, s3_client: S3ClientWrapper, bucket_name: str):
|
||||||
|
with reporter.step("Create bucket with invalid name and catch exception"):
|
||||||
|
with pytest.raises(Exception, match=".*(?:InvalidBucketName|Invalid bucket name).*"):
|
||||||
|
s3_client.create_bucket(bucket_name)
|
||||||
|
|
||||||
|
@allure.title("[NEGATIVE] Delete non-empty bucket (s3_client={s3_client})")
|
||||||
|
def test_s3_check_availability_non_empty_bucket_after_deleting(
|
||||||
|
self,
|
||||||
|
bucket: str,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
s3_client: S3ClientWrapper,
|
||||||
|
):
|
||||||
|
object_path = generate_file(simple_object_size.value)
|
||||||
|
object_name = s3_helper.object_key_from_file_path(object_path)
|
||||||
|
|
||||||
|
with reporter.step("Put object into bucket"):
|
||||||
|
s3_client.put_object(bucket, object_path)
|
||||||
|
|
||||||
|
with reporter.step("Check that object appears in bucket"):
|
||||||
|
objects = s3_client.list_objects(bucket)
|
||||||
|
assert objects, f"Expected bucket with object, got empty {objects}"
|
||||||
|
assert object_name in objects, f"Object {object_name} not found in bucket object list {objects}"
|
||||||
|
|
||||||
|
with reporter.step("Try to delete not empty bucket and get error"):
|
||||||
|
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
|
||||||
|
s3_client.delete_bucket(bucket)
|
||||||
|
|
||||||
|
with reporter.step("Check bucket availability"):
|
||||||
|
objects = s3_client.list_objects(bucket)
|
||||||
|
assert objects, f"Expected bucket with object, got empty {objects}"
|
||||||
|
assert object_name in objects, f"Object {object_name} not found in bucket object list {objects}"
|
||||||
|
|
|
@ -22,14 +22,13 @@ def bucket_no_lock(s3_client: S3ClientWrapper):
|
||||||
return s3_client.create_bucket(object_lock_enabled_for_bucket=False)
|
return s3_client.create_bucket(object_lock_enabled_for_bucket=False)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
@pytest.mark.s3_gate_locking
|
@pytest.mark.s3_gate_locking
|
||||||
@pytest.mark.parametrize("version_id", [None, "second"])
|
@pytest.mark.parametrize("version_id", [None, "second"])
|
||||||
class TestS3GateLocking:
|
class TestS3GateLocking:
|
||||||
@allure.title("Retention period and legal lock on object (version_id={version_id}, s3_client={s3_client})")
|
@allure.title("Retention period and legal lock on object (version_id={version_id}, s3_client={s3_client})")
|
||||||
def test_s3_object_locking(
|
def test_s3_object_locking(self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize):
|
||||||
self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize
|
|
||||||
):
|
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||||
retention_period = 2
|
retention_period = 2
|
||||||
|
@ -73,9 +72,7 @@ class TestS3GateLocking:
|
||||||
s3_client.delete_object(bucket_w_lock, file_name, version_id)
|
s3_client.delete_object(bucket_w_lock, file_name, version_id)
|
||||||
|
|
||||||
@allure.title("Impossible to change retention mode COMPLIANCE (version_id={version_id}, s3_client={s3_client})")
|
@allure.title("Impossible to change retention mode COMPLIANCE (version_id={version_id}, s3_client={s3_client})")
|
||||||
def test_s3_mode_compliance(
|
def test_s3_mode_compliance(self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize):
|
||||||
self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize
|
|
||||||
):
|
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||||
retention_period = 2
|
retention_period = 2
|
||||||
|
@ -105,9 +102,7 @@ class TestS3GateLocking:
|
||||||
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id)
|
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id)
|
||||||
|
|
||||||
@allure.title("Change retention mode GOVERNANCE (version_id={version_id}, s3_client={s3_client})")
|
@allure.title("Change retention mode GOVERNANCE (version_id={version_id}, s3_client={s3_client})")
|
||||||
def test_s3_mode_governance(
|
def test_s3_mode_governance(self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize):
|
||||||
self, s3_client: S3ClientWrapper, bucket_w_lock: str, version_id: str, simple_object_size: ObjectSize
|
|
||||||
):
|
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||||
retention_period = 3
|
retention_period = 3
|
||||||
|
@ -155,12 +150,8 @@ class TestS3GateLocking:
|
||||||
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id, True)
|
s3_client.put_object_retention(bucket_w_lock, file_name, retention, version_id, True)
|
||||||
s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "GOVERNANCE", date_obj, "OFF")
|
s3_helper.assert_object_lock_mode(s3_client, bucket_w_lock, file_name, "GOVERNANCE", date_obj, "OFF")
|
||||||
|
|
||||||
@allure.title(
|
@allure.title("[NEGATIVE] Lock object in bucket with disabled locking (version_id={version_id}, s3_client={s3_client})")
|
||||||
"[NEGATIVE] Lock object in bucket with disabled locking (version_id={version_id}, s3_client={s3_client})"
|
def test_s3_legal_hold(self, s3_client: S3ClientWrapper, bucket_no_lock: str, version_id: str, simple_object_size: ObjectSize):
|
||||||
)
|
|
||||||
def test_s3_legal_hold(
|
|
||||||
self, s3_client: S3ClientWrapper, bucket_no_lock: str, version_id: str, simple_object_size: ObjectSize
|
|
||||||
):
|
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||||
|
|
||||||
|
@ -174,6 +165,7 @@ class TestS3GateLocking:
|
||||||
s3_client.put_object_legal_hold(bucket_no_lock, file_name, "ON", version_id)
|
s3_client.put_object_legal_hold(bucket_no_lock, file_name, "ON", version_id)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
class TestS3GateLockingBucket:
|
class TestS3GateLockingBucket:
|
||||||
@allure.title("Bucket Lock (s3_client={s3_client})")
|
@allure.title("Bucket Lock (s3_client={s3_client})")
|
||||||
|
|
|
@ -2,7 +2,8 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.steps.cli.container import list_objects, search_container_by_name
|
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||||
|
from frostfs_testlib.steps.cli.container import list_objects
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
@ -13,6 +14,7 @@ from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split
|
||||||
PART_SIZE = 5 * 1024 * 1024
|
PART_SIZE = 5 * 1024 * 1024
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
@pytest.mark.s3_gate_multipart
|
@pytest.mark.s3_gate_multipart
|
||||||
class TestS3GateMultipart(ClusterTestBase):
|
class TestS3GateMultipart(ClusterTestBase):
|
||||||
|
@ -21,7 +23,12 @@ class TestS3GateMultipart(ClusterTestBase):
|
||||||
@allure.title("Object Multipart API (s3_client={s3_client}, bucket versioning = {versioning_status})")
|
@allure.title("Object Multipart API (s3_client={s3_client}, bucket versioning = {versioning_status})")
|
||||||
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED, VersioningStatus.UNDEFINED], indirect=True)
|
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED, VersioningStatus.UNDEFINED], indirect=True)
|
||||||
def test_s3_object_multipart(
|
def test_s3_object_multipart(
|
||||||
self, s3_client: S3ClientWrapper, bucket: str, default_wallet: WalletInfo, versioning_status: str
|
self,
|
||||||
|
s3_client: S3ClientWrapper,
|
||||||
|
bucket: str,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
versioning_status: str,
|
||||||
|
bucket_container_resolver: BucketContainerResolver,
|
||||||
):
|
):
|
||||||
parts_count = 5
|
parts_count = 5
|
||||||
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
|
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
|
||||||
|
@ -31,7 +38,7 @@ class TestS3GateMultipart(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step(f"Get related container_id for bucket"):
|
with reporter.step(f"Get related container_id for bucket"):
|
||||||
for cluster_node in self.cluster.cluster_nodes:
|
for cluster_node in self.cluster.cluster_nodes:
|
||||||
container_id = search_container_by_name(bucket, cluster_node)
|
container_id = bucket_container_resolver.resolve(cluster_node, bucket)
|
||||||
if container_id:
|
if container_id:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
@ -86,6 +93,7 @@ class TestS3GateMultipart(ClusterTestBase):
|
||||||
bucket: str,
|
bucket: str,
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
complex_object_size: ObjectSize,
|
complex_object_size: ObjectSize,
|
||||||
|
bucket_container_resolver: BucketContainerResolver,
|
||||||
):
|
):
|
||||||
complex_file = generate_file(complex_object_size.value)
|
complex_file = generate_file(complex_object_size.value)
|
||||||
simple_file = generate_file(simple_object_size.value)
|
simple_file = generate_file(simple_object_size.value)
|
||||||
|
@ -95,7 +103,7 @@ class TestS3GateMultipart(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Get related container_id for bucket"):
|
with reporter.step("Get related container_id for bucket"):
|
||||||
for cluster_node in self.cluster.cluster_nodes:
|
for cluster_node in self.cluster.cluster_nodes:
|
||||||
container_id = search_container_by_name(bucket, cluster_node)
|
container_id = bucket_container_resolver.resolve(cluster_node, bucket)
|
||||||
if container_id:
|
if container_id:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
|
|
@ -16,15 +16,10 @@ from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.testing.test_control import expect_not_raises
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
from frostfs_testlib.utils import wallet_utils
|
from frostfs_testlib.utils import wallet_utils
|
||||||
from frostfs_testlib.utils.file_utils import (
|
from frostfs_testlib.utils.file_utils import TestFile, concat_files, generate_file, generate_file_with_content, get_file_hash
|
||||||
TestFile,
|
|
||||||
concat_files,
|
|
||||||
generate_file,
|
|
||||||
generate_file_with_content,
|
|
||||||
get_file_hash,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
@pytest.mark.s3_gate_object
|
@pytest.mark.s3_gate_object
|
||||||
class TestS3GateObject:
|
class TestS3GateObject:
|
||||||
|
@ -356,9 +351,7 @@ class TestS3GateObject:
|
||||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||||
with reporter.step("Put several versions of object into bucket"):
|
with reporter.step("Put several versions of object into bucket"):
|
||||||
version_id_1 = s3_client.put_object(bucket, file_name_simple)
|
version_id_1 = s3_client.put_object(bucket, file_name_simple)
|
||||||
file_name_1 = generate_file_with_content(
|
file_name_1 = generate_file_with_content(simple_object_size.value, file_path=file_name_simple, content=version_2_content)
|
||||||
simple_object_size.value, file_path=file_name_simple, content=version_2_content
|
|
||||||
)
|
|
||||||
version_id_2 = s3_client.put_object(bucket, file_name_1)
|
version_id_2 = s3_client.put_object(bucket, file_name_1)
|
||||||
|
|
||||||
with reporter.step("Get first version of object"):
|
with reporter.step("Get first version of object"):
|
||||||
|
@ -444,9 +437,7 @@ class TestS3GateObject:
|
||||||
assert get_file_hash(con_file_1) == get_file_hash(file_name_1), "Hashes must be the same"
|
assert get_file_hash(con_file_1) == get_file_hash(file_name_1), "Hashes must be the same"
|
||||||
|
|
||||||
with reporter.step("Get object"):
|
with reporter.step("Get object"):
|
||||||
object_3_part_1 = s3_client.get_object(
|
object_3_part_1 = s3_client.get_object(bucket, file_name, object_range=[0, int(simple_object_size.value / 3)])
|
||||||
bucket, file_name, object_range=[0, int(simple_object_size.value / 3)]
|
|
||||||
)
|
|
||||||
object_3_part_2 = s3_client.get_object(
|
object_3_part_2 = s3_client.get_object(
|
||||||
bucket,
|
bucket,
|
||||||
file_name,
|
file_name,
|
||||||
|
@ -560,9 +551,7 @@ class TestS3GateObject:
|
||||||
elif list_type == "v2":
|
elif list_type == "v2":
|
||||||
list_obj = s3_client.list_objects_v2(bucket)
|
list_obj = s3_client.list_objects_v2(bucket)
|
||||||
assert len(list_obj) == 2, "bucket should have 2 objects"
|
assert len(list_obj) == 2, "bucket should have 2 objects"
|
||||||
assert (
|
assert list_obj.sort() == [file_name, file_name_2].sort(), f"bucket should have object key {file_name, file_name_2}"
|
||||||
list_obj.sort() == [file_name, file_name_2].sort()
|
|
||||||
), f"bucket should have object key {file_name, file_name_2}"
|
|
||||||
|
|
||||||
with reporter.step("Delete object"):
|
with reporter.step("Delete object"):
|
||||||
delete_obj = s3_client.delete_object(bucket, file_name)
|
delete_obj = s3_client.delete_object(bucket, file_name)
|
||||||
|
@ -695,13 +684,9 @@ class TestS3GateObject:
|
||||||
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
|
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
|
||||||
s3_client.put_object(bucket, file_path, grant_full_control=f"id={second_wallet_public_key}")
|
s3_client.put_object(bucket, file_path, grant_full_control=f"id={second_wallet_public_key}")
|
||||||
|
|
||||||
with reporter.step(
|
with reporter.step("[NEGATIVE] Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"):
|
||||||
"[NEGATIVE] Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"
|
|
||||||
):
|
|
||||||
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
|
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
|
||||||
s3_client.put_object(
|
s3_client.put_object(bucket, file_path, grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers")
|
||||||
bucket, file_path, grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers"
|
|
||||||
)
|
|
||||||
|
|
||||||
@allure.title("Put object with lock-mode (s3_client={s3_client})")
|
@allure.title("Put object with lock-mode (s3_client={s3_client})")
|
||||||
def test_s3_put_object_lock_mode(
|
def test_s3_put_object_lock_mode(
|
||||||
|
@ -727,9 +712,7 @@ class TestS3GateObject:
|
||||||
)
|
)
|
||||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF")
|
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF")
|
||||||
|
|
||||||
with reporter.step(
|
with reporter.step("Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +3days]"):
|
||||||
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +3days]"
|
|
||||||
):
|
|
||||||
date_obj = datetime.utcnow() + timedelta(days=2)
|
date_obj = datetime.utcnow() + timedelta(days=2)
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
||||||
s3_client.put_object(
|
s3_client.put_object(
|
||||||
|
@ -740,9 +723,7 @@ class TestS3GateObject:
|
||||||
)
|
)
|
||||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF")
|
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF")
|
||||||
|
|
||||||
with reporter.step(
|
with reporter.step("Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +2days]"):
|
||||||
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +2days]"
|
|
||||||
):
|
|
||||||
date_obj = datetime.utcnow() + timedelta(days=3)
|
date_obj = datetime.utcnow() + timedelta(days=3)
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
||||||
s3_client.put_object(
|
s3_client.put_object(
|
||||||
|
@ -809,9 +790,7 @@ class TestS3GateObject:
|
||||||
|
|
||||||
with reporter.step(f"Check all objects put in bucket_{i} successfully"):
|
with reporter.step(f"Check all objects put in bucket_{i} successfully"):
|
||||||
bucket_objects = s3_client.list_objects_v2(bucket)
|
bucket_objects = s3_client.list_objects_v2(bucket)
|
||||||
assert set(put_objects) == set(
|
assert set(put_objects) == set(bucket_objects), f"Expected all objects {put_objects} in objects list {bucket_objects}"
|
||||||
bucket_objects
|
|
||||||
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
|
|
||||||
|
|
||||||
with reporter.step("Delete some objects from bucket_1 one by one"):
|
with reporter.step("Delete some objects from bucket_1 one by one"):
|
||||||
objects_to_delete_b1 = random.sample(put_objects, k=max_delete_objects)
|
objects_to_delete_b1 = random.sample(put_objects, k=max_delete_objects)
|
||||||
|
@ -871,9 +850,7 @@ class TestS3GateObject:
|
||||||
with reporter.step("Check these are the same objects"):
|
with reporter.step("Check these are the same objects"):
|
||||||
for obj_key in objects:
|
for obj_key in objects:
|
||||||
got_object = s3_client.get_object(bucket, obj_key)
|
got_object = s3_client.get_object(bucket, obj_key)
|
||||||
assert get_file_hash(got_object) == get_file_hash(
|
assert get_file_hash(got_object) == get_file_hash(key_to_path.get(obj_key)), "Expected hashes are the same"
|
||||||
key_to_path.get(obj_key)
|
|
||||||
), "Expected hashes are the same"
|
|
||||||
obj_head = s3_client.head_object(bucket, obj_key)
|
obj_head = s3_client.head_object(bucket, obj_key)
|
||||||
assert obj_head.get("Metadata") == object_metadata, f"Metadata of object is {object_metadata}"
|
assert obj_head.get("Metadata") == object_metadata, f"Metadata of object is {object_metadata}"
|
||||||
object_grants = s3_client.get_object_acl(bucket, obj_key)
|
object_grants = s3_client.get_object_acl(bucket, obj_key)
|
||||||
|
@ -890,11 +867,7 @@ class TestS3GateObject:
|
||||||
|
|
||||||
with reporter.step("Put object"):
|
with reporter.step("Put object"):
|
||||||
test_file = generate_file(simple_object_size.value)
|
test_file = generate_file(simple_object_size.value)
|
||||||
obj_key = (
|
obj_key = "/" + "/".join(["".join(random.choices(key_characters_sample, k=5)) for _ in range(10)]) + "/test_file_1"
|
||||||
"/"
|
|
||||||
+ "/".join(["".join(random.choices(key_characters_sample, k=5)) for _ in range(10)])
|
|
||||||
+ "/test_file_1"
|
|
||||||
)
|
|
||||||
s3_client.put_object(bucket, test_file, obj_key)
|
s3_client.put_object(bucket, test_file, obj_key)
|
||||||
|
|
||||||
with reporter.step("Check object can be downloaded"):
|
with reporter.step("Check object can be downloaded"):
|
||||||
|
|
|
@ -1,12 +1,11 @@
|
||||||
import json
|
import json
|
||||||
import os
|
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from botocore.exceptions import ClientError
|
from botocore.exceptions import ClientError
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.steps.cli.container import search_container_by_name
|
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.steps.storage_policy import get_simple_object_copies
|
from frostfs_testlib.steps.storage_policy import get_simple_object_copies
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
@ -15,13 +14,20 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.testing.test_control import expect_not_raises
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ....resources.common import S3_POLICY_FILE_LOCATION
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
@pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
|
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
|
||||||
class TestS3GatePolicy(ClusterTestBase):
|
class TestS3GatePolicy(ClusterTestBase):
|
||||||
@allure.title("Bucket creation with retention policy applied (s3_client={s3_client})")
|
@allure.title("Bucket creation with retention policy applied (s3_client={s3_client})")
|
||||||
def test_s3_bucket_location(
|
def test_s3_bucket_location(
|
||||||
self, default_wallet: WalletInfo, s3_client: S3ClientWrapper, simple_object_size: ObjectSize
|
self,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
s3_client: S3ClientWrapper,
|
||||||
|
simple_object_size: ObjectSize,
|
||||||
|
bucket_container_resolver: BucketContainerResolver,
|
||||||
):
|
):
|
||||||
file_path_1 = generate_file(simple_object_size.value)
|
file_path_1 = generate_file(simple_object_size.value)
|
||||||
file_name_1 = s3_helper.object_key_from_file_path(file_path_1)
|
file_name_1 = s3_helper.object_key_from_file_path(file_path_1)
|
||||||
|
@ -34,9 +40,7 @@ class TestS3GatePolicy(ClusterTestBase):
|
||||||
bucket_2 = s3_client.create_bucket(location_constraint="rep-3")
|
bucket_2 = s3_client.create_bucket(location_constraint="rep-3")
|
||||||
s3_helper.set_bucket_versioning(s3_client, bucket_2, VersioningStatus.ENABLED)
|
s3_helper.set_bucket_versioning(s3_client, bucket_2, VersioningStatus.ENABLED)
|
||||||
list_buckets = s3_client.list_buckets()
|
list_buckets = s3_client.list_buckets()
|
||||||
assert (
|
assert bucket_1 in list_buckets and bucket_2 in list_buckets, f"Expected two buckets {bucket_1, bucket_2}, got {list_buckets}"
|
||||||
bucket_1 in list_buckets and bucket_2 in list_buckets
|
|
||||||
), f"Expected two buckets {bucket_1, bucket_2}, got {list_buckets}"
|
|
||||||
|
|
||||||
with reporter.step("Check head buckets"):
|
with reporter.step("Check head buckets"):
|
||||||
with expect_not_raises():
|
with expect_not_raises():
|
||||||
|
@ -57,7 +61,7 @@ class TestS3GatePolicy(ClusterTestBase):
|
||||||
|
|
||||||
with reporter.step("Check object policy"):
|
with reporter.step("Check object policy"):
|
||||||
for cluster_node in self.cluster.cluster_nodes:
|
for cluster_node in self.cluster.cluster_nodes:
|
||||||
cid_1 = search_container_by_name(name=bucket_1, node=cluster_node)
|
cid_1 = bucket_container_resolver.resolve(cluster_node, bucket_1)
|
||||||
if cid_1:
|
if cid_1:
|
||||||
break
|
break
|
||||||
copies_1 = get_simple_object_copies(
|
copies_1 = get_simple_object_copies(
|
||||||
|
@ -69,7 +73,7 @@ class TestS3GatePolicy(ClusterTestBase):
|
||||||
)
|
)
|
||||||
assert copies_1 == 1
|
assert copies_1 == 1
|
||||||
for cluster_node in self.cluster.cluster_nodes:
|
for cluster_node in self.cluster.cluster_nodes:
|
||||||
cid_2 = search_container_by_name(name=bucket_2, node=cluster_node)
|
cid_2 = bucket_container_resolver.resolve(cluster_node, bucket_2)
|
||||||
if cid_2:
|
if cid_2:
|
||||||
break
|
break
|
||||||
copies_2 = get_simple_object_copies(
|
copies_2 = get_simple_object_copies(
|
||||||
|
@ -97,7 +101,6 @@ class TestS3GatePolicy(ClusterTestBase):
|
||||||
s3_client.get_bucket_policy(bucket)
|
s3_client.get_bucket_policy(bucket)
|
||||||
|
|
||||||
with reporter.step("Put new policy"):
|
with reporter.step("Put new policy"):
|
||||||
custom_policy = f"file://{os.getcwd()}/pytest_tests/resources/files/bucket_policy.json"
|
|
||||||
custom_policy = {
|
custom_policy = {
|
||||||
"Version": "2012-10-17",
|
"Version": "2012-10-17",
|
||||||
"Id": "aaaa-bbbb-cccc-dddd",
|
"Id": "aaaa-bbbb-cccc-dddd",
|
||||||
|
|
|
@ -11,6 +11,7 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
@pytest.mark.s3_gate_tagging
|
@pytest.mark.s3_gate_tagging
|
||||||
class TestS3GateTagging:
|
class TestS3GateTagging:
|
||||||
|
|
|
@ -9,6 +9,7 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content, get_file_content
|
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content, get_file_content
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
@pytest.mark.s3_gate_versioning
|
@pytest.mark.s3_gate_versioning
|
||||||
class TestS3GateVersioning:
|
class TestS3GateVersioning:
|
||||||
|
@ -77,9 +78,7 @@ class TestS3GateVersioning:
|
||||||
|
|
||||||
file_name = s3_client.get_object(bucket, obj_key)
|
file_name = s3_client.get_object(bucket, obj_key)
|
||||||
got_content = get_file_content(file_name)
|
got_content = get_file_content(file_name)
|
||||||
assert (
|
assert got_content == version_2_content, f"Expected object content is\n{version_2_content}\nGot\n{got_content}"
|
||||||
got_content == version_2_content
|
|
||||||
), f"Expected object content is\n{version_2_content}\nGot\n{got_content}"
|
|
||||||
|
|
||||||
@allure.title("Enable and disable versioning without object_lock (s3_client={s3_client})")
|
@allure.title("Enable and disable versioning without object_lock (s3_client={s3_client})")
|
||||||
def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
|
def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
|
||||||
|
@ -97,9 +96,7 @@ class TestS3GateVersioning:
|
||||||
actual_version = [version.get("VersionId") for version in object_version if version.get("Key") == file_name]
|
actual_version = [version.get("VersionId") for version in object_version if version.get("Key") == file_name]
|
||||||
assert actual_version == ["null"], f"Expected version is null in list-object-versions, got {object_version}"
|
assert actual_version == ["null"], f"Expected version is null in list-object-versions, got {object_version}"
|
||||||
object_0 = s3_client.head_object(bucket, file_name)
|
object_0 = s3_client.head_object(bucket, file_name)
|
||||||
assert (
|
assert object_0.get("VersionId") == "null", f"Expected version is null in head-object, got {object_0.get('VersionId')}"
|
||||||
object_0.get("VersionId") == "null"
|
|
||||||
), f"Expected version is null in head-object, got {object_0.get('VersionId')}"
|
|
||||||
|
|
||||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
|
from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.utils import string_utils
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
|
@ -15,12 +14,12 @@ def owner_wallet(default_wallet: WalletInfo) -> WalletInfo:
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def user_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
|
def user_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
|
||||||
with reporter.step("Create user wallet which will use objects from owner via static session"):
|
with reporter.step("Create user wallet which will use objects from owner via static session"):
|
||||||
user = User(f"user_{hex(int(datetime.now().timestamp() * 1000000))}")
|
user = User(string_utils.unique_name("user-"))
|
||||||
return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
|
return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def stranger_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
|
def stranger_wallet(credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
|
||||||
with reporter.step("Create stranger user wallet which should fail to obtain data"):
|
with reporter.step("Create stranger user wallet which should fail to obtain data"):
|
||||||
user = User(f"user_{hex(int(datetime.now().timestamp() * 1000000))}")
|
user = User(string_utils.unique_name("user-"))
|
||||||
return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
|
return credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
|
||||||
|
|
|
@ -10,10 +10,10 @@ from frostfs_testlib.steps.session_token import create_session_token
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils import wallet_utils
|
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
@pytest.mark.session_token
|
@pytest.mark.session_token
|
||||||
class TestDynamicObjectSession(ClusterTestBase):
|
class TestDynamicObjectSession(ClusterTestBase):
|
||||||
|
|
|
@ -3,12 +3,7 @@ import logging
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.error_patterns import (
|
from frostfs_testlib.resources.error_patterns import EXPIRED_SESSION_TOKEN, MALFORMED_REQUEST, OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND
|
||||||
EXPIRED_SESSION_TOKEN,
|
|
||||||
MALFORMED_REQUEST,
|
|
||||||
OBJECT_ACCESS_DENIED,
|
|
||||||
OBJECT_NOT_FOUND,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
from frostfs_testlib.steps.cli.container import create_container
|
||||||
from frostfs_testlib.steps.cli.object import (
|
from frostfs_testlib.steps.cli.object import (
|
||||||
|
@ -138,6 +133,7 @@ def static_sessions(
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.static_session
|
@pytest.mark.static_session
|
||||||
class TestObjectStaticSession(ClusterTestBase):
|
class TestObjectStaticSession(ClusterTestBase):
|
||||||
@allure.title("Read operations with static session (method={method_under_test.__name__}, obj_size={object_size})")
|
@allure.title("Read operations with static session (method={method_under_test.__name__}, obj_size={object_size})")
|
||||||
|
|
|
@ -8,6 +8,7 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.static_session_container
|
@pytest.mark.static_session_container
|
||||||
class TestSessionTokenContainer(ClusterTestBase):
|
class TestSessionTokenContainer(ClusterTestBase):
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
|
|
0
pytest_tests/testsuites/shard/__init__.py
Normal file
0
pytest_tests/testsuites/shard/__init__.py
Normal file
|
@ -1,13 +1,13 @@
|
||||||
import json
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
||||||
from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE
|
from frostfs_testlib.shell.interfaces import Shell
|
||||||
from frostfs_testlib.steps.cli.container import create_container, delete_container
|
from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, put_object
|
||||||
from frostfs_testlib.steps.cli.object import delete_object, get_object, get_object_nodes, put_object
|
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
|
||||||
from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher
|
from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher
|
||||||
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
|
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
|
||||||
|
@ -17,64 +17,86 @@ from frostfs_testlib.testing import parallel, wait_for_success
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
|
||||||
|
|
||||||
|
|
||||||
|
def set_shard_rw_mode(node: ClusterNode):
|
||||||
|
watcher = ShardsWatcher(node)
|
||||||
|
shards = watcher.get_shards()
|
||||||
|
for shard in shards:
|
||||||
|
watcher.set_shard_mode(shard["shard_id"], mode="read-write")
|
||||||
|
watcher.await_for_all_shards_status(status="read-write")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
@allure.title("Revert all shards mode")
|
||||||
|
def revert_all_shards_mode(cluster: Cluster) -> None:
|
||||||
|
yield
|
||||||
|
parallel(set_shard_rw_mode, cluster.cluster_nodes)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def object_id(client_shell: Shell, cluster: Cluster, container: str, default_wallet: WalletInfo, max_object_size: int) -> str:
|
||||||
|
with reporter.step("Create container, and put object"):
|
||||||
|
file = generate_file(round(max_object_size * 0.8))
|
||||||
|
oid = put_object(default_wallet, file, container, client_shell, cluster.default_rpc_endpoint)
|
||||||
|
return oid
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def node_with_object(cluster: Cluster, container: str, object_id: str) -> ClusterNode:
|
||||||
|
with reporter.step("Search node with object"):
|
||||||
|
nodes = get_object_nodes(cluster, container, object_id, cluster.cluster_nodes[0])
|
||||||
|
|
||||||
|
return nodes[0]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
@wait_for_success(180, 30, title="Search object in system")
|
||||||
|
def object_path_on_node(object_id: str, container: str, node_with_object: ClusterNode) -> str:
|
||||||
|
oid_path = f"{object_id[0]}/{object_id[1]}/{object_id[2]}/{object_id[3]}"
|
||||||
|
object_path = None
|
||||||
|
|
||||||
|
with reporter.step("Search object file"):
|
||||||
|
node_shell = node_with_object.storage_node.host.get_shell()
|
||||||
|
data_path = node_with_object.storage_node.get_data_directory()
|
||||||
|
all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip()
|
||||||
|
for data_dir in all_datas.replace(".", "").strip().split("\n"):
|
||||||
|
check_dir = node_shell.exec(f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0").stdout
|
||||||
|
if "1" in check_dir:
|
||||||
|
object_path = f"{data_path}/data/{data_dir}/data/{oid_path}"
|
||||||
|
object_name = f"{object_id[4:]}.{container}"
|
||||||
|
break
|
||||||
|
|
||||||
|
assert object_path is not None, f"{object_id} object not found in directory - {data_path}/data"
|
||||||
|
return os.path.join(object_path, object_name)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def erroneous_object_id(object_id: str, object_path_on_node: str, node_with_object: ClusterNode):
|
||||||
|
with reporter.step("Block read file"):
|
||||||
|
node_with_object.host.get_shell().exec(f"chmod a-r {object_path_on_node}")
|
||||||
|
|
||||||
|
yield object_id
|
||||||
|
|
||||||
|
with reporter.step("Restore file access"):
|
||||||
|
node_with_object.host.get_shell().exec(f"chmod +r {object_path_on_node}")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def change_config_storage(cluster_state_controller: ClusterStateController):
|
||||||
|
with reporter.step("Change threshold error shards"):
|
||||||
|
cluster_state_controller.manager(ConfigStateManager).set_on_all_nodes(
|
||||||
|
service_type=StorageNode, values={"storage:shard_ro_error_threshold": "5"}
|
||||||
|
)
|
||||||
|
yield
|
||||||
|
with reporter.step("Restore threshold error shards"):
|
||||||
|
cluster_state_controller.manager(ConfigStateManager).revert_all()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.nightly
|
||||||
@pytest.mark.shard
|
@pytest.mark.shard
|
||||||
class TestControlShard(ClusterTestBase):
|
class TestControlShard(ClusterTestBase):
|
||||||
@staticmethod
|
|
||||||
@wait_for_success(180, 30)
|
|
||||||
def get_object_path_and_name_file(oid: str, cid: str, node: ClusterNode) -> tuple[str, str]:
|
|
||||||
oid_path = f"{oid[0]}/{oid[1]}/{oid[2]}/{oid[3]}"
|
|
||||||
object_path = None
|
|
||||||
|
|
||||||
with reporter.step("Search object file"):
|
|
||||||
node_shell = node.storage_node.host.get_shell()
|
|
||||||
data_path = node.storage_node.get_data_directory()
|
|
||||||
all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip()
|
|
||||||
for data_dir in all_datas.replace(".", "").strip().split("\n"):
|
|
||||||
check_dir = node_shell.exec(f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0").stdout
|
|
||||||
if "1" in check_dir:
|
|
||||||
object_path = f"{data_path}/data/{data_dir}/data/{oid_path}"
|
|
||||||
object_name = f"{oid[4:]}.{cid}"
|
|
||||||
break
|
|
||||||
|
|
||||||
assert object_path is not None, f"{oid} object not found in directory - {data_path}/data"
|
|
||||||
return object_path, object_name
|
|
||||||
|
|
||||||
def set_shard_rw_mode(self, node: ClusterNode):
|
|
||||||
watcher = ShardsWatcher(node)
|
|
||||||
shards = watcher.get_shards()
|
|
||||||
for shard in shards:
|
|
||||||
watcher.set_shard_mode(shard["shard_id"], mode="read-write")
|
|
||||||
watcher.await_for_all_shards_status(status="read-write")
|
|
||||||
|
|
||||||
@pytest.fixture()
|
|
||||||
@allure.title("Revert all shards mode")
|
|
||||||
def revert_all_shards_mode(self) -> None:
|
|
||||||
yield
|
|
||||||
parallel(self.set_shard_rw_mode, self.cluster.cluster_nodes)
|
|
||||||
|
|
||||||
@pytest.fixture()
|
|
||||||
def oid_cid_node(self, default_wallet: WalletInfo, max_object_size: int) -> tuple[str, str, ClusterNode]:
|
|
||||||
with reporter.step("Create container, and put object"):
|
|
||||||
cid = create_container(
|
|
||||||
wallet=default_wallet,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=self.cluster.default_rpc_endpoint,
|
|
||||||
rule="REP 1 CBF 1",
|
|
||||||
basic_acl=EACL_PUBLIC_READ_WRITE,
|
|
||||||
)
|
|
||||||
file = generate_file(round(max_object_size * 0.8))
|
|
||||||
oid = put_object(wallet=default_wallet, path=file, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
|
||||||
with reporter.step("Search node with object"):
|
|
||||||
nodes = get_object_nodes(cluster=self.cluster, cid=cid, oid=oid, alive_node=self.cluster.cluster_nodes[0])
|
|
||||||
|
|
||||||
yield oid, cid, nodes[0]
|
|
||||||
|
|
||||||
object_path, object_name = self.get_object_path_and_name_file(oid, cid, nodes[0])
|
|
||||||
nodes[0].host.get_shell().exec(f"chmod +r {object_path}/{object_name}")
|
|
||||||
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
|
||||||
delete_container(wallet=default_wallet, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_shards_from_cli(node: StorageNode) -> list[Shard]:
|
def get_shards_from_cli(node: StorageNode) -> list[Shard]:
|
||||||
wallet_path = node.get_remote_wallet_path()
|
wallet_path = node.get_remote_wallet_path()
|
||||||
|
@ -93,16 +115,6 @@ class TestControlShard(ClusterTestBase):
|
||||||
)
|
)
|
||||||
return [Shard.from_object(shard) for shard in json.loads(result.stdout.split(">", 1)[1])]
|
return [Shard.from_object(shard) for shard in json.loads(result.stdout.split(">", 1)[1])]
|
||||||
|
|
||||||
@pytest.fixture()
|
|
||||||
def change_config_storage(self, cluster_state_controller: ClusterStateController):
|
|
||||||
with reporter.step("Change threshold error shards"):
|
|
||||||
cluster_state_controller.manager(ConfigStateManager).set_on_all_nodes(
|
|
||||||
service_type=StorageNode, values={"storage:shard_ro_error_threshold": "5"}
|
|
||||||
)
|
|
||||||
yield
|
|
||||||
with reporter.step("Restore threshold error shards"):
|
|
||||||
cluster_state_controller.manager(ConfigStateManager).revert_all()
|
|
||||||
|
|
||||||
@allure.title("All shards are available")
|
@allure.title("All shards are available")
|
||||||
def test_control_shard(self, cluster: Cluster):
|
def test_control_shard(self, cluster: Cluster):
|
||||||
for storage_node in cluster.storage_nodes:
|
for storage_node in cluster.storage_nodes:
|
||||||
|
@ -113,31 +125,25 @@ class TestControlShard(ClusterTestBase):
|
||||||
|
|
||||||
@allure.title("Shard become read-only when errors exceeds threshold")
|
@allure.title("Shard become read-only when errors exceeds threshold")
|
||||||
@pytest.mark.failover
|
@pytest.mark.failover
|
||||||
|
@requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1"))
|
||||||
def test_shard_errors(
|
def test_shard_errors(
|
||||||
self,
|
self,
|
||||||
default_wallet: WalletInfo,
|
default_wallet: WalletInfo,
|
||||||
oid_cid_node: tuple[str, str, ClusterNode],
|
container: str,
|
||||||
|
node_with_object: ClusterNode,
|
||||||
|
erroneous_object_id: str,
|
||||||
|
object_path_on_node: str,
|
||||||
change_config_storage: None,
|
change_config_storage: None,
|
||||||
revert_all_shards_mode: None,
|
revert_all_shards_mode: None,
|
||||||
):
|
):
|
||||||
oid, cid, node = oid_cid_node
|
|
||||||
with reporter.step("Search object in system."):
|
|
||||||
object_path, object_name = self.get_object_path_and_name_file(*oid_cid_node)
|
|
||||||
with reporter.step("Block read file"):
|
|
||||||
node.host.get_shell().exec(f"chmod a-r {object_path}/{object_name}")
|
|
||||||
with reporter.step("Get object, expect 6 errors"):
|
with reporter.step("Get object, expect 6 errors"):
|
||||||
for _ in range(6):
|
for _ in range(6):
|
||||||
with pytest.raises(RuntimeError):
|
with pytest.raises(RuntimeError):
|
||||||
get_object(
|
get_object(default_wallet, container, erroneous_object_id, self.shell, node_with_object.storage_node.get_rpc_endpoint())
|
||||||
wallet=default_wallet,
|
|
||||||
cid=cid,
|
|
||||||
oid=oid,
|
|
||||||
shell=self.shell,
|
|
||||||
endpoint=node.storage_node.get_rpc_endpoint(),
|
|
||||||
)
|
|
||||||
with reporter.step("Check shard status"):
|
with reporter.step("Check shard status"):
|
||||||
for shard in ShardsWatcher(node).get_shards():
|
for shard in ShardsWatcher(node_with_object).get_shards():
|
||||||
if shard["blobstor"][1]["path"] in object_path:
|
if shard["blobstor"][1]["path"] in object_path_on_node:
|
||||||
with reporter.step(f"Shard - {shard['shard_id']} to {node.host_ip}, mode - {shard['mode']}"):
|
with reporter.step(f"Shard {shard['shard_id']} should be in read-only mode"):
|
||||||
assert shard["mode"] == "read-only"
|
assert shard["mode"] == "read-only"
|
||||||
break
|
break
|
||||||
|
|
|
@ -31,7 +31,17 @@ class TestLogs:
|
||||||
if not os.path.exists(logs_dir):
|
if not os.path.exists(logs_dir):
|
||||||
os.makedirs(logs_dir)
|
os.makedirs(logs_dir)
|
||||||
|
|
||||||
issues_regex = r"\bpanic\b|\boom\b|too many|insufficient funds|insufficient amount of gas|cannot assign requested address|\bunable to process\b"
|
regexes = [
|
||||||
|
r"\bpanic\b",
|
||||||
|
r"\boom\b",
|
||||||
|
r"too many",
|
||||||
|
r"insufficient funds",
|
||||||
|
r"insufficient amount of gas",
|
||||||
|
r"cannot assign requested address",
|
||||||
|
r"\bunable to process\b",
|
||||||
|
r"\bmaximum number of subscriptions is reached\b",
|
||||||
|
]
|
||||||
|
issues_regex = "|".join(regexes)
|
||||||
exclude_filter = r"too many requests"
|
exclude_filter = r"too many requests"
|
||||||
log_level_priority = "3" # will include 0-3 priority logs (0: emergency 1: alerts 2: critical 3: errors)
|
log_level_priority = "3" # will include 0-3 priority logs (0: emergency 1: alerts 2: critical 3: errors)
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
allure-pytest==2.13.2
|
allure-pytest==2.13.2
|
||||||
allure-python-commons==2.13.2
|
allure-python-commons==2.13.2
|
||||||
base58==2.1.0
|
base58==2.1.0
|
||||||
boto3==1.16.33
|
boto3==1.35.30
|
||||||
botocore==1.19.33
|
|
||||||
configobj==5.0.6
|
configobj==5.0.6
|
||||||
neo-mamba==1.0.0
|
neo-mamba==1.0.0
|
||||||
pexpect==4.8.0
|
pexpect==4.8.0
|
||||||
|
|
Loading…
Reference in a new issue