Compare commits

..

No commits in common. "feature--11044" and "master" have entirely different histories.

66 changed files with 811 additions and 3394 deletions

View file

@ -1,109 +0,0 @@
hosts:
- address: localhost
hostname: localhost
attributes:
sudo_shell: false
plugin_name: docker
healthcheck_plugin_name: basic
attributes:
skip_readiness_check: True
force_transactions: True
services:
- name: frostfs-storage_01
attributes:
container_name: s01
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
wallet_password: ""
volume_name: storage_storage_s01
endpoint_data0: s01.frostfs.devenv:8080
control_endpoint: s01.frostfs.devenv:8081
un_locode: "RU MOW"
- name: frostfs-storage_02
attributes:
container_name: s02
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
wallet_password: ""
volume_name: storage_storage_s02
endpoint_data0: s02.frostfs.devenv:8080
control_endpoint: s02.frostfs.devenv:8081
un_locode: "RU LED"
- name: frostfs-storage_03
attributes:
container_name: s03
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
wallet_password: ""
volume_name: storage_storage_s03
endpoint_data0: s03.frostfs.devenv:8080
control_endpoint: s03.frostfs.devenv:8081
un_locode: "SE STO"
- name: frostfs-storage_04
attributes:
container_name: s04
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
wallet_password: ""
volume_name: storage_storage_s04
endpoint_data0: s04.frostfs.devenv:8080
control_endpoint: s04.frostfs.devenv:8081
un_locode: "FI HEL"
- name: frostfs-s3_01
attributes:
container_name: s3_gate
config_path: ../frostfs-dev-env/services/s3_gate/.s3.env
wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
local_wallet_config_path: ./TemporaryDir/password-s3.yml
local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
wallet_password: "s3"
endpoint_data0: https://s3.frostfs.devenv:8080
- name: frostfs-http_01
attributes:
container_name: http_gate
config_path: ../frostfs-dev-env/services/http_gate/.http.env
wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
wallet_password: "one"
endpoint_data0: http://http.frostfs.devenv
- name: frostfs-ir_01
attributes:
container_name: ir01
config_path: ../frostfs-dev-env/services/ir/.ir.env
wallet_path: ../frostfs-dev-env/services/ir/az.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/ir/az.json
wallet_password: "one"
- name: neo-go_01
attributes:
container_name: morph_chain
config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://morph-chain.frostfs.devenv:30333
- name: main-chain_01
attributes:
container_name: main_chain
config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://main-chain.frostfs.devenv:30333
- name: coredns_01
attributes:
container_name: coredns
clis:
- name: frostfs-cli
exec_path: frostfs-cli

View file

@ -1 +0,0 @@
* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov

View file

@ -89,7 +89,4 @@ push = false
filterwarnings = [ filterwarnings = [
"ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning",
] ]
testpaths = ["tests"] testpaths = ["tests"]
[project.entry-points.pytest11]
testlib = "frostfs_testlib"

View file

@ -1,3 +1 @@
__version__ = "2.0.1" __version__ = "2.0.1"
from .fixtures import configure_testlib, hosting, temp_directory

View file

@ -1,5 +1,5 @@
from frostfs_testlib.analytics import test_case from frostfs_testlib.analytics import test_case
from frostfs_testlib.analytics.test_case import TestCasePriority from frostfs_testlib.analytics.test_case import TestCasePriority
from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector
from frostfs_testlib.analytics.test_exporter import TСExporter from frostfs_testlib.analytics.test_exporter import TestExporter
from frostfs_testlib.analytics.testrail_exporter import TestrailExporter from frostfs_testlib.analytics.testrail_exporter import TestrailExporter

View file

@ -3,8 +3,7 @@ from abc import ABC, abstractmethod
from frostfs_testlib.analytics.test_collector import TestCase from frostfs_testlib.analytics.test_collector import TestCase
# TODO: REMOVE ME class TestExporter(ABC):
class TСExporter(ABC):
test_cases_cache = [] test_cases_cache = []
test_suites_cache = [] test_suites_cache = []
@ -47,7 +46,9 @@ class TСExporter(ABC):
""" """
@abstractmethod @abstractmethod
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: def update_test_case(
self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section
) -> None:
""" """
Update test case in TMS Update test case in TMS
""" """
@ -59,7 +60,9 @@ class TСExporter(ABC):
for test_case in test_cases: for test_case in test_cases:
test_suite = self.get_or_create_test_suite(test_case.suite_name) test_suite = self.get_or_create_test_suite(test_case.suite_name)
test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name) test_section = self.get_or_create_suite_section(
test_suite, test_case.suite_section_name
)
test_case_in_tms = self.search_test_case_id(test_case.id) test_case_in_tms = self.search_test_case_id(test_case.id)
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]

View file

@ -1,10 +1,10 @@
from testrail_api import TestRailAPI from testrail_api import TestRailAPI
from frostfs_testlib.analytics.test_collector import TestCase from frostfs_testlib.analytics.test_collector import TestCase
from frostfs_testlib.analytics.test_exporter import TСExporter from frostfs_testlib.analytics.test_exporter import TestExporter
class TestrailExporter(TСExporter): class TestrailExporter(TestExporter):
def __init__( def __init__(
self, self,
tr_url: str, tr_url: str,
@ -62,13 +62,19 @@ class TestrailExporter(TСExporter):
It's help do not call TMS each time then we search test case It's help do not call TMS each time then we search test case
""" """
for test_suite in self.test_suites_cache: for test_suite in self.test_suites_cache:
self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])) self.test_cases_cache.extend(
self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])
)
def search_test_case_id(self, test_case_id: str) -> object: def search_test_case_id(self, test_case_id: str) -> object:
""" """
Find test cases in TestRail (cache) by ID Find test cases in TestRail (cache) by ID
""" """
test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id] test_cases = [
test_case
for test_case in self.test_cases_cache
if test_case["custom_autotest_name"] == test_case_id
]
if len(test_cases) > 1: if len(test_cases) > 1:
raise RuntimeError(f"Too many results found in test rail for id {test_case_id}") raise RuntimeError(f"Too many results found in test rail for id {test_case_id}")
@ -81,7 +87,9 @@ class TestrailExporter(TСExporter):
""" """
Get suite name with exact name from Testrail or create if not exist Get suite name with exact name from Testrail or create if not exist
""" """
test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name] test_rail_suites = [
suite for suite in self.test_suites_cache if suite["name"] == test_suite_name
]
if not test_rail_suites: if not test_rail_suites:
test_rail_suite = self.api.suites.add_suite( test_rail_suite = self.api.suites.add_suite(
@ -94,13 +102,17 @@ class TestrailExporter(TСExporter):
elif len(test_rail_suites) == 1: elif len(test_rail_suites) == 1:
return test_rail_suites.pop() return test_rail_suites.pop()
else: else:
raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}") raise RuntimeError(
f"Too many results found in test rail for suite name {test_suite_name}"
)
def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: def get_or_create_suite_section(self, test_rail_suite, section_name) -> object:
""" """
Get suite section with exact name from Testrail or create new one if not exist Get suite section with exact name from Testrail or create new one if not exist
""" """
test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name] test_rail_sections = [
section for section in test_rail_suite["sections"] if section["name"] == section_name
]
if not test_rail_sections: if not test_rail_sections:
test_rail_section = self.api.sections.add_section( test_rail_section = self.api.sections.add_section(
@ -116,7 +128,9 @@ class TestrailExporter(TСExporter):
elif len(test_rail_sections) == 1: elif len(test_rail_sections) == 1:
return test_rail_sections.pop() return test_rail_sections.pop()
else: else:
raise RuntimeError(f"Too many results found in test rail for section name {section_name}") raise RuntimeError(
f"Too many results found in test rail for section name {section_name}"
)
def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict: def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict:
""" """
@ -150,7 +164,9 @@ class TestrailExporter(TСExporter):
self.api.cases.add_case(**request_body) self.api.cases.add_case(**request_body)
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: def update_test_case(
self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section
) -> None:
""" """
Update test case in Testrail Update test case in Testrail
""" """

View file

@ -110,7 +110,7 @@ class FrostfsAdmMorph(CliCommand):
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{param: param_value for param, param_value in locals().items() if param not in ["self"]},
) )
def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult: def dump_hashes(self, rpc_endpoint: str) -> CommandResult:
"""Dump deployed contract hashes. """Dump deployed contract hashes.
Args: Args:
@ -350,129 +350,3 @@ class FrostfsAdmMorph(CliCommand):
if param not in ["self", "node_netmap_keys"] if param not in ["self", "node_netmap_keys"]
}, },
) )
def add_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
path: Path to encoded chain in JSON or binary format
rule: Rule statement
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control add-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address string Address of wallet account
chain-id string Chain id
chain-id-hex Flag to parse chain ID as hex
endpoint string Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name string Resource name in APE resource name format
target-type string Resource type(container/namespace)
timeout duration Timeout for an operation (default 15s)
wallet string Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control get-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_rules(
self,
target_type: str,
target_name: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape list-rule-chains",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
all: Remove all chains
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control remove-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,70 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliApeManager(CliCommand):
"""Operations with APE manager."""
def add(
self,
rpc_endpoint: str,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
path: Optional[str] = None,
rule: Optional[str] | Optional[list[str]] = None,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Add rule chain for a target."""
return self._execute(
"ape-manager add",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list(
self,
rpc_endpoint: str,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"ape-manager list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove(
self,
rpc_endpoint: str,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"ape-manager remove",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,54 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliBearer(CliCommand):
def create(
self,
rpc_endpoint: str,
out: str,
issued_at: Optional[str] = None,
expire_at: Optional[str] = None,
not_valid_before: Optional[str] = None,
ape: Optional[str] = None,
eacl: Optional[str] = None,
owner: Optional[str] = None,
json: Optional[bool] = False,
impersonate: Optional[bool] = False,
wallet: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Create bearer token.
All epoch flags can be specified relative to the current epoch with the +n syntax.
In this case --rpc-endpoint flag should be specified and the epoch in bearer token
is set to current epoch + n.
"""
return self._execute(
"bearer create",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def generate_ape_override(
self,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
cid: Optional[str] = None,
output: Optional[str] = None,
path: Optional[str] = None,
rule: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"bearer generate-ape-override",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -2,8 +2,6 @@ from typing import Optional
from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting
from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL
from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager
from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer
from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer
from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl
from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap
@ -43,5 +41,3 @@ class FrostfsCli:
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file)
self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file) self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file)
self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file)
self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file)

View file

@ -9,15 +9,11 @@ class FrostfsCliContainer(CliCommand):
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: Optional[str] = None,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
basic_acl: Optional[str] = None, basic_acl: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
disable_timestamp: bool = False, disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None, name: Optional[str] = None,
nonce: Optional[str] = None, nonce: Optional[str] = None,
policy: Optional[str] = None, policy: Optional[str] = None,
@ -39,8 +35,6 @@ class FrostfsCliContainer(CliCommand):
basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write', basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write',
'private', 'eacl-public-read' (default "private"). 'private', 'eacl-public-read' (default "private").
disable_timestamp: Disable timestamp container attribute. disable_timestamp: Disable timestamp container attribute.
force: Skip placement validity check.
trace: Generate trace ID and print it.
name: Container name attribute. name: Container name attribute.
nonce: UUIDv4 nonce value for container. nonce: UUIDv4 nonce value for container.
policy: QL-encoded or JSON-encoded placement policy or path to file with it. policy: QL-encoded or JSON-encoded placement policy or path to file with it.
@ -51,8 +45,6 @@ class FrostfsCliContainer(CliCommand):
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s). timeout: Timeout for the operation (default 15s).
nns_zone: Container nns zone attribute.
nns_name: Container nns name attribute.
Returns: Returns:
Command's result. Command's result.
@ -73,7 +65,6 @@ class FrostfsCliContainer(CliCommand):
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
force: bool = False, force: bool = False,
trace: bool = False,
) -> CommandResult: ) -> CommandResult:
""" """
Delete an existing container. Delete an existing container.
@ -83,7 +74,6 @@ class FrostfsCliContainer(CliCommand):
address: Address of wallet account. address: Address of wallet account.
await_mode: Block execution until container is removed. await_mode: Block execution until container is removed.
cid: Container ID. cid: Container ID.
trace: Generate trace ID and print it.
force: Do not check whether container contains locks and remove immediately. force: Do not check whether container contains locks and remove immediately.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Path to a JSON-encoded container session token. session: Path to a JSON-encoded container session token.
@ -106,11 +96,9 @@ class FrostfsCliContainer(CliCommand):
cid: str, cid: str,
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False, await_mode: bool = False,
to: Optional[str] = None, to: Optional[str] = None,
json_mode: bool = False, json_mode: bool = False,
trace: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = None, timeout: Optional[str] = None,
@ -123,14 +111,12 @@ class FrostfsCliContainer(CliCommand):
await_mode: Block execution until container is removed. await_mode: Block execution until container is removed.
cid: Container ID. cid: Container ID.
json_mode: Print or dump container in JSON format. json_mode: Print or dump container in JSON format.
trace: Generate trace ID and print it.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
to: Path to dump encoded container. to: Path to dump encoded container.
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s). timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns: Returns:
Command's result. Command's result.
@ -146,7 +132,6 @@ class FrostfsCliContainer(CliCommand):
cid: str, cid: str,
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False, await_mode: bool = False,
to: Optional[str] = None, to: Optional[str] = None,
session: Optional[str] = None, session: Optional[str] = None,
@ -163,14 +148,11 @@ class FrostfsCliContainer(CliCommand):
cid: Container ID. cid: Container ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
to: Path to dump encoded container. to: Path to dump encoded container.
json_mode: Print or dump container in JSON format.
trace: Generate trace ID and print it.
session: Path to a JSON-encoded container session token. session: Path to a JSON-encoded container session token.
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s). timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns: Returns:
Command's result. Command's result.
@ -184,10 +166,8 @@ class FrostfsCliContainer(CliCommand):
def list( def list(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
name: Optional[str] = None,
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: Optional[bool] = None,
owner: Optional[str] = None, owner: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -199,15 +179,12 @@ class FrostfsCliContainer(CliCommand):
Args: Args:
address: Address of wallet account. address: Address of wallet account.
name: List containers by the attribute name.
owner: Owner of containers (omit to use owner from private key). owner: Owner of containers (omit to use owner from private key).
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
trace: Generate trace ID and print it.
timeout: Timeout for the operation (default 15s). timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns: Returns:
Command's result. Command's result.
@ -221,11 +198,8 @@ class FrostfsCliContainer(CliCommand):
self, self,
rpc_endpoint: str, rpc_endpoint: str,
cid: str, cid: str,
bearer: Optional[str] = None,
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = None, timeout: Optional[str] = None,
@ -236,14 +210,11 @@ class FrostfsCliContainer(CliCommand):
Args: Args:
address: Address of wallet account. address: Address of wallet account.
cid: Container ID. cid: Container ID.
bearer: File with signed JSON or binary encoded bearer token.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
trace: Generate trace ID and print it.
timeout: Timeout for the operation (default 15s). timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns: Returns:
Command's result. Command's result.
@ -253,7 +224,6 @@ class FrostfsCliContainer(CliCommand):
**{param: value for param, value in locals().items() if param not in ["self"]}, **{param: value for param, value in locals().items() if param not in ["self"]},
) )
# TODO Deprecated method with 0.42
def set_eacl( def set_eacl(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
@ -299,7 +269,6 @@ class FrostfsCliContainer(CliCommand):
address: Optional[str] = None, address: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
from_file: Optional[str] = None, from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True, short: Optional[bool] = True,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None, generate_key: Optional[bool] = None,
@ -317,9 +286,8 @@ class FrostfsCliContainer(CliCommand):
from_file: string File path with encoded container from_file: string File path with encoded container
timeout: duration Timeout for the operation (default 15 s) timeout: duration Timeout for the operation (default 15 s)
short: shorten the output of node information. short: shorten the output of node information.
trace: Generate trace ID and print it.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
generate_key: Generate a new private key. generate_key: Generate a new private key
Returns: Returns:

View file

@ -69,7 +69,7 @@ class FrostfsCliControl(CliCommand):
wallet: Path to the wallet or binary key wallet: Path to the wallet or binary key
address: Address of wallet account address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>') endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
objects: List of object addresses to be removed in string format objects: List of object addresses to be removed in string format
timeout: Timeout for an operation (default 15s) timeout: Timeout for an operation (default 15s)
Returns: Returns:
@ -78,155 +78,4 @@ class FrostfsCliControl(CliCommand):
return self._execute( return self._execute(
"control drop-objects", "control drop-objects",
**{param: value for param, value in locals().items() if param not in ["self"]}, **{param: value for param, value in locals().items() if param not in ["self"]},
) )
def add_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
path: Path to encoded chain in JSON or binary format
rule: Rule statement
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control add-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address string Address of wallet account
chain-id string Chain id
chain-id-hex Flag to parse chain ID as hex
endpoint string Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name string Resource name in APE resource name format
target-type string Resource type(container/namespace)
timeout duration Timeout for an operation (default 15s)
wallet string Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control get-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_rules(
self,
endpoint: str,
target_name: str,
target_type: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control list-rules",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_targets(
self,
endpoint: str,
chain_name: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-name: Chain name(ingress|s3)
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control list-targets",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
all: Remove all chains
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control remove-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -13,7 +13,6 @@ class FrostfsCliObject(CliCommand):
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
session: Optional[str] = None, session: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -26,7 +25,6 @@ class FrostfsCliObject(CliCommand):
address: Address of wallet account. address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
generate_key: Generate new private key.
oid: Object ID. oid: Object ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Filepath to a JSON- or binary-encoded token of the object DELETE session. session: Filepath to a JSON- or binary-encoded token of the object DELETE session.
@ -51,7 +49,6 @@ class FrostfsCliObject(CliCommand):
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
file: Optional[str] = None, file: Optional[str] = None,
header: Optional[str] = None, header: Optional[str] = None,
no_progress: bool = False, no_progress: bool = False,
@ -69,7 +66,6 @@ class FrostfsCliObject(CliCommand):
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
file: File to write object payload to. Default: stdout. file: File to write object payload to. Default: stdout.
generate_key: Generate new private key.
header: File to write header to. Default: stdout. header: File to write header to. Default: stdout.
no_progress: Do not show progress bar. no_progress: Do not show progress bar.
oid: Object ID. oid: Object ID.
@ -97,7 +93,6 @@ class FrostfsCliObject(CliCommand):
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None, range: Optional[str] = None,
salt: Optional[str] = None, salt: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
@ -113,7 +108,6 @@ class FrostfsCliObject(CliCommand):
address: Address of wallet account. address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
generate_key: Generate new private key.
oid: Object ID. oid: Object ID.
range: Range to take hash from in the form offset1:length1,... range: Range to take hash from in the form offset1:length1,...
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
@ -141,7 +135,6 @@ class FrostfsCliObject(CliCommand):
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
file: Optional[str] = None, file: Optional[str] = None,
json_mode: bool = False, json_mode: bool = False,
main_only: bool = False, main_only: bool = False,
@ -160,7 +153,6 @@ class FrostfsCliObject(CliCommand):
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
file: File to write object payload to. Default: stdout. file: File to write object payload to. Default: stdout.
generate_key: Generate new private key.
json_mode: Marshal output in JSON. json_mode: Marshal output in JSON.
main_only: Return only main fields. main_only: Return only main fields.
oid: Object ID. oid: Object ID.
@ -191,7 +183,6 @@ class FrostfsCliObject(CliCommand):
expire_at: Optional[int] = None, expire_at: Optional[int] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
session: Optional[str] = None, session: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -204,7 +195,6 @@ class FrostfsCliObject(CliCommand):
address: Address of wallet account. address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
generate_key: Generate new private key.
oid: Object ID. oid: Object ID.
lifetime: Lock lifetime. lifetime: Lock lifetime.
expire_at: Lock expiration epoch. expire_at: Lock expiration epoch.
@ -232,7 +222,6 @@ class FrostfsCliObject(CliCommand):
address: Optional[str] = None, address: Optional[str] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
copies_number: Optional[int] = None, copies_number: Optional[int] = None,
disable_filename: bool = False, disable_filename: bool = False,
disable_timestamp: bool = False, disable_timestamp: bool = False,
@ -257,7 +246,6 @@ class FrostfsCliObject(CliCommand):
disable_timestamp: Do not set well-known timestamp attribute. disable_timestamp: Do not set well-known timestamp attribute.
expire_at: Last epoch in the life of the object. expire_at: Last epoch in the life of the object.
file: File with object payload. file: File with object payload.
generate_key: Generate new private key.
no_progress: Do not show progress bar. no_progress: Do not show progress bar.
notify: Object notification in the form of *epoch*:*topic*; '-' notify: Object notification in the form of *epoch*:*topic*; '-'
topic means using default. topic means using default.
@ -285,7 +273,6 @@ class FrostfsCliObject(CliCommand):
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
file: Optional[str] = None, file: Optional[str] = None,
json_mode: bool = False, json_mode: bool = False,
raw: bool = False, raw: bool = False,
@ -302,7 +289,6 @@ class FrostfsCliObject(CliCommand):
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
file: File to write object payload to. Default: stdout. file: File to write object payload to. Default: stdout.
generate_key: Generate new private key.
json_mode: Marshal output in JSON. json_mode: Marshal output in JSON.
oid: Object ID. oid: Object ID.
range: Range to take data from in the form offset:length. range: Range to take data from in the form offset:length.
@ -329,7 +315,6 @@ class FrostfsCliObject(CliCommand):
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
filters: Optional[list] = None, filters: Optional[list] = None,
oid: Optional[str] = None, oid: Optional[str] = None,
phy: bool = False, phy: bool = False,
@ -347,7 +332,6 @@ class FrostfsCliObject(CliCommand):
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
filters: Repeated filter expressions or files with protobuf JSON. filters: Repeated filter expressions or files with protobuf JSON.
generate_key: Generate new private key.
oid: Object ID. oid: Object ID.
phy: Search physically stored objects. phy: Search physically stored objects.
root: Search for user objects. root: Search for user objects.
@ -370,15 +354,14 @@ class FrostfsCliObject(CliCommand):
self, self,
rpc_endpoint: str, rpc_endpoint: str,
cid: str, cid: str,
oid: Optional[str] = None,
wallet: Optional[str] = None, wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None, generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False, trace: bool = False,
root: bool = False, root: bool = False,
verify_presence_all: bool = False, verify_presence_all: bool = False,
json: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = None, timeout: Optional[str] = None,

View file

@ -40,7 +40,7 @@ class FrostfsCliShards(CliCommand):
self, self,
endpoint: str, endpoint: str,
mode: str, mode: str,
id: Optional[list[str]] = None, id: Optional[list[str]],
wallet: Optional[str] = None, wallet: Optional[str] = None,
wallet_password: Optional[str] = None, wallet_password: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
@ -143,101 +143,3 @@ class FrostfsCliShards(CliCommand):
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
) )
def evacuation_start(
self,
endpoint: str,
id: Optional[str] = None,
scope: Optional[str] = None,
all: bool = False,
no_errors: bool = True,
await_mode: bool = False,
address: Optional[str] = None,
timeout: Optional[str] = None,
no_progress: bool = False,
) -> CommandResult:
"""
Objects evacuation from shard to other shards.
Args:
address: Address of wallet account
all: Process all shards
await: Block execution until evacuation is completed
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
id: List of shard IDs in base58 encoding
no_errors: Skip invalid/unreadable objects (default true)
no_progress: Print progress if await provided
scope: Evacuation scope; possible values: trees, objects, all (default "all")
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation start",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def evacuation_reset(
self,
endpoint: str,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Reset evacuate objects from shard to other shards status.
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation reset",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def evacuation_stop(
self,
endpoint: str,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Stop running evacuate process from shard to other shards.
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation stop",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def evacuation_status(
self,
endpoint: str,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get evacuate objects from shard to other shards status.
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation status",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -27,27 +27,3 @@ class FrostfsCliTree(CliCommand):
"tree healthcheck", "tree healthcheck",
**{param: value for param, value in locals().items() if param not in ["self"]}, **{param: value for param, value in locals().items() if param not in ["self"]},
) )
def list(
self,
cid: str,
rpc_endpoint: Optional[str] = None,
wallet: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Get Tree List
Args:
cid: Container ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
timeout: duration Timeout for the operation (default 15 s)
Returns:
Command's result.
"""
return self._execute(
"tree list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -54,11 +54,3 @@ class FrostfsCliUtil(CliCommand):
"util sign session-token", "util sign session-token",
**{param: value for param, value in locals().items() if param not in ["self"]}, **{param: value for param, value in locals().items() if param not in ["self"]},
) )
def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False):
"""Convert representation of extended ACL table."""
return self._execute(
"util convert eacl",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -15,8 +15,6 @@ class NetmapParser:
"epoch_duration": r"Epoch duration: (?P<epoch_duration>\d+)", "epoch_duration": r"Epoch duration: (?P<epoch_duration>\d+)",
"inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P<inner_ring_candidate_fee>\d+)", "inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P<inner_ring_candidate_fee>\d+)",
"maximum_object_size": r"Maximum object size: (?P<maximum_object_size>\d+)", "maximum_object_size": r"Maximum object size: (?P<maximum_object_size>\d+)",
"maximum_count_of_data_shards": r"Maximum count of data shards: (?P<maximum_count_of_data_shards>\d+)",
"maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P<maximum_count_of_parity_shards>\d+)",
"withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)", "withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)",
"homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)", "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)",
"maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)", "maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)",

View file

@ -26,7 +26,7 @@ class S3CredentialsProvider(ABC):
self.cluster = cluster self.cluster = cluster
@abstractmethod @abstractmethod
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials: def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials:
raise NotImplementedError("Directly called abstract class?") raise NotImplementedError("Directly called abstract class?")
@ -35,7 +35,7 @@ class GrpcCredentialsProvider(ABC):
self.cluster = cluster self.cluster = cluster
@abstractmethod @abstractmethod
def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo: def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo:
raise NotImplementedError("Directly called abstract class?") raise NotImplementedError("Directly called abstract class?")

View file

@ -1,45 +0,0 @@
import logging
import os
from importlib.metadata import entry_points
import pytest
import yaml
from frostfs_testlib import reporter
from frostfs_testlib.hosting.hosting import Hosting
from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE
from frostfs_testlib.storage import get_service_registry
@pytest.fixture(scope="session")
def configure_testlib():
reporter.get_reporter().register_handler(reporter.AllureHandler())
reporter.get_reporter().register_handler(reporter.StepsLogger())
logging.getLogger("paramiko").setLevel(logging.INFO)
# Register Services for cluster
registry = get_service_registry()
services = entry_points(group="frostfs.testlib.services")
for svc in services:
registry.register_service(svc.name, svc.load())
@pytest.fixture(scope="session")
def temp_directory(configure_testlib):
with reporter.step("Prepare tmp directory"):
full_path = ASSETS_DIR
if not os.path.exists(full_path):
os.mkdir(full_path)
return full_path
@pytest.fixture(scope="session")
def hosting(configure_testlib) -> Hosting:
with open(HOSTING_CONFIG_FILE, "r") as file:
hosting_config = yaml.full_load(file)
hosting_instance = Hosting()
hosting_instance.configure(hosting_config)
return hosting_instance

View file

@ -47,14 +47,6 @@ class BasicHealthcheck(Healthcheck):
self._perform(cluster_node, checks) self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}")
def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
checks = {
self._tree_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}") @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}")
def services_healthcheck(self, cluster_node: ClusterNode): def services_healthcheck(self, cluster_node: ClusterNode):
svcs_to_check = cluster_node.services svcs_to_check = cluster_node.services

View file

@ -19,7 +19,3 @@ class Healthcheck(ABC):
@abstractmethod @abstractmethod
def services_healthcheck(self, cluster_node: ClusterNode): def services_healthcheck(self, cluster_node: ClusterNode):
"""Perform service status check on target cluster node""" """Perform service status check on target cluster node"""
@abstractmethod
def tree_healthcheck(self, cluster_node: ClusterNode):
"""Perform tree healthcheck on target cluster node"""

View file

@ -60,7 +60,6 @@ class HostConfig:
""" """
plugin_name: str plugin_name: str
hostname: str
healthcheck_plugin_name: str healthcheck_plugin_name: str
address: str address: str
s3_creds_plugin_name: str = field(default="authmate") s3_creds_plugin_name: str = field(default="authmate")

View file

@ -185,12 +185,6 @@ class DockerHost(Host):
def is_file_exist(self, file_path: str) -> None: def is_file_exist(self, file_path: str) -> None:
raise NotImplementedError("Not implemented for docker") raise NotImplementedError("Not implemented for docker")
def wipefs_storage_node_data(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def finish_wipefs(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
volume_path = self.get_data_directory(service_name) volume_path = self.get_data_directory(service_name)
@ -246,7 +240,6 @@ class DockerHost(Host):
until: Optional[datetime] = None, until: Optional[datetime] = None,
unit: Optional[str] = None, unit: Optional[str] = None,
exclude_filter: Optional[str] = None, exclude_filter: Optional[str] = None,
priority: Optional[str] = None,
) -> str: ) -> str:
client = self._get_docker_client() client = self._get_docker_client()
filtered_logs = "" filtered_logs = ""

View file

@ -178,21 +178,6 @@ class Host(ABC):
cache_only: To delete cache only. cache_only: To delete cache only.
""" """
@abstractmethod
def wipefs_storage_node_data(self, service_name: str) -> None:
"""Erases all data of the storage node with specified name.
Args:
service_name: Name of storage node service.
"""
def finish_wipefs(self, service_name: str) -> None:
"""Erases all data of the storage node with specified name.
Args:
service_name: Name of storage node service.
"""
@abstractmethod @abstractmethod
def delete_fstree(self, service_name: str) -> None: def delete_fstree(self, service_name: str) -> None:
""" """
@ -312,7 +297,6 @@ class Host(ABC):
until: Optional[datetime] = None, until: Optional[datetime] = None,
unit: Optional[str] = None, unit: Optional[str] = None,
exclude_filter: Optional[str] = None, exclude_filter: Optional[str] = None,
priority: Optional[str] = None,
) -> str: ) -> str:
"""Get logs from host filtered by regex. """Get logs from host filtered by regex.
@ -321,8 +305,6 @@ class Host(ABC):
since: If set, limits the time from which logs should be collected. Must be in UTC. since: If set, limits the time from which logs should be collected. Must be in UTC.
until: If set, limits the time until which logs should be collected. Must be in UTC. until: If set, limits the time until which logs should be collected. Must be in UTC.
unit: required unit. unit: required unit.
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0.
Returns: Returns:
Found entries as str if any found. Found entries as str if any found.

View file

@ -86,7 +86,7 @@ class SummarizedStats:
target.latencies.by_node[node_key] = operation.latency target.latencies.by_node[node_key] = operation.latency
target.throughput += operation.throughput target.throughput += operation.throughput
target.errors.threshold = load_params.error_threshold target.errors.threshold = load_params.error_threshold
target.total_bytes += operation.total_bytes target.total_bytes = operation.total_bytes
if operation.failed_iterations: if operation.failed_iterations:
target.errors.by_node[node_key] = operation.failed_iterations target.errors.by_node[node_key] = operation.failed_iterations

View file

@ -25,16 +25,6 @@ def convert_time_to_seconds(time: int | str | None) -> int:
return seconds return seconds
def force_list(input: str | list[str]):
if input is None:
return None
if isinstance(input, list):
return list(map(str.strip, input))
return [input.strip()]
class LoadType(Enum): class LoadType(Enum):
gRPC = "grpc" gRPC = "grpc"
S3 = "s3" S3 = "s3"
@ -129,8 +119,6 @@ class NodesSelectionStrategy(Enum):
ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST" ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST"
# Select ONE random node except under test (useful for failover). # Select ONE random node except under test (useful for failover).
RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST" RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST"
# Select node under test
NODE_UNDER_TEST = "NODE_UNDER_TEST"
class EndpointSelectionStrategy(Enum): class EndpointSelectionStrategy(Enum):
@ -152,29 +140,8 @@ class K6ProcessAllocationStrategy(Enum):
PER_ENDPOINT = "PER_ENDPOINT" PER_ENDPOINT = "PER_ENDPOINT"
class MetaConfig:
def _get_field_formatter(self, field_name: str) -> Callable | None:
data_fields = fields(self)
formatters = [
field.metadata["formatter"]
for field in data_fields
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
]
if formatters:
return formatters[0]
return None
def __setattr__(self, field_name, value):
formatter = self._get_field_formatter(field_name)
if formatter:
value = formatter(value)
super().__setattr__(field_name, value)
@dataclass @dataclass
class Preset(MetaConfig): class Preset:
# ------ COMMON ------ # ------ COMMON ------
# Amount of objects which should be created # Amount of objects which should be created
objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False) objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False)
@ -189,15 +156,13 @@ class Preset(MetaConfig):
# Amount of containers which should be created # Amount of containers which should be created
containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False)
# Container placement policy for containers for gRPC # Container placement policy for containers for gRPC
container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list) container_placement_policy: Optional[str] = metadata_field(grpc_preset_scenarios, "policy", None, False)
# Number of retries for creation of container
container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False)
# ------ S3 ------ # ------ S3 ------
# Amount of buckets which should be created # Amount of buckets which should be created
buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False) buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False)
# S3 region (AKA placement policy for S3 buckets) # S3 region (AKA placement policy for S3 buckets)
s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list) s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False)
# Delay between containers creation and object upload for preset # Delay between containers creation and object upload for preset
object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False) object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False)
@ -210,7 +175,7 @@ class Preset(MetaConfig):
@dataclass @dataclass
class PrometheusParams(MetaConfig): class PrometheusParams:
# Prometheus server URL # Prometheus server URL
server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False) server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False)
# Prometheus trend stats # Prometheus trend stats
@ -220,7 +185,7 @@ class PrometheusParams(MetaConfig):
@dataclass @dataclass
class LoadParams(MetaConfig): class LoadParams:
# ------- CONTROL PARAMS ------- # ------- CONTROL PARAMS -------
# Load type can be gRPC, HTTP, S3. # Load type can be gRPC, HTTP, S3.
load_type: LoadType load_type: LoadType
@ -268,8 +233,6 @@ class LoadParams(MetaConfig):
) )
# Percentage of filling of all data disks on all nodes # Percentage of filling of all data disks on all nodes
fill_percent: Optional[float] = None fill_percent: Optional[float] = None
# if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB")
# if set, the payload is generated on the fly and is not read into memory fully. # if set, the payload is generated on the fly and is not read into memory fully.
streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False)
# Output format # Output format
@ -445,11 +408,6 @@ class LoadParams(MetaConfig):
# For preset calls, bool values are passed with just --<argument_name> if the value is True # For preset calls, bool values are passed with just --<argument_name> if the value is True
return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else "" return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else ""
if isinstance(meta_field.value, list):
return (
" ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else ""
)
return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'" return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'"
@staticmethod @staticmethod
@ -469,6 +427,25 @@ class LoadParams(MetaConfig):
return fields_with_data or [] return fields_with_data or []
def _get_field_formatter(self, field_name: str) -> Callable | None:
data_fields = fields(self)
formatters = [
field.metadata["formatter"]
for field in data_fields
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
]
if formatters:
return formatters[0]
return None
def __setattr__(self, field_name, value):
formatter = self._get_field_formatter(field_name)
if formatter:
value = formatter(value)
super().__setattr__(field_name, value)
def __str__(self) -> str: def __str__(self) -> str:
load_type_str = self.scenario.value if self.scenario else self.load_type.value load_type_str = self.scenario.value if self.scenario else self.load_type.value
# TODO: migrate load_params defaults to testlib # TODO: migrate load_params defaults to testlib

View file

@ -57,8 +57,6 @@ class LoadVerifier:
invalid_objects = verify_metrics.read.failed_iterations invalid_objects = verify_metrics.read.failed_iterations
total_left_objects = load_metrics.write.success_iterations - delete_success total_left_objects = load_metrics.write.success_iterations - delete_success
if invalid_objects > 0:
issues.append(f"There were {invalid_objects} verification fails (hash mismatch).")
# Due to interruptions we may see total verified objects to be less than written on writers count # Due to interruptions we may see total verified objects to be less than written on writers count
if abs(total_left_objects - verified_objects) > writers: if abs(total_left_objects - verified_objects) > writers:
issues.append( issues.append(

View file

@ -46,10 +46,3 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file:
MAX_REQUEST_ATTEMPTS = 5 MAX_REQUEST_ATTEMPTS = 5
RETRY_MODE = "standard" RETRY_MODE = "standard"
CREDENTIALS_CREATE_TIMEOUT = "1m" CREDENTIALS_CREATE_TIMEOUT = "1m"
HOSTING_CONFIG_FILE = os.getenv(
"HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml"))
)
MORE_LOG = os.getenv("MORE_LOG", "1")

View file

@ -23,10 +23,6 @@ INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow"
INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier"
INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier"
S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" S3_MALFORMED_XML_REQUEST = (
S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." "The XML you provided was not well-formed or did not validate against our published schema."
)
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"

View file

@ -26,7 +26,6 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv(
) )
BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off")
PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40")
PRESET_CONTAINER_CREATION_RETRY_COUNT = os.getenv("CONTAINER_CREATION_RETRY_COUNT", "20")
# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) # TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read)
PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1") PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1")
K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6")

View file

@ -1,9 +0,0 @@
ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"}
ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"}
CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"}
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl
PRIVATE_GRANTS = []
PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT]
PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT]

View file

@ -1,6 +1,7 @@
import json import json
import logging import logging
import os import os
import uuid
from datetime import datetime from datetime import datetime
from time import sleep from time import sleep
from typing import Literal, Optional, Union from typing import Literal, Optional, Union
@ -10,11 +11,9 @@ from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, R
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.utils import string_utils
# TODO: Refactor this code to use shell instead of _cmd_run # TODO: Refactor this code to use shell instead of _cmd_run
from frostfs_testlib.utils.cli_utils import _configure_aws_cli from frostfs_testlib.utils.cli_utils import _configure_aws_cli
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
command_options = CommandOptions(timeout=480) command_options = CommandOptions(timeout=480)
@ -68,7 +67,7 @@ class AwsCliClient(S3ClientWrapper):
location_constraint: Optional[str] = None, location_constraint: Optional[str] = None,
) -> str: ) -> str:
if bucket is None: if bucket is None:
bucket = string_utils.unique_name("bucket-") bucket = str(uuid.uuid4())
if object_lock_enabled_for_bucket is None: if object_lock_enabled_for_bucket is None:
object_lock = "" object_lock = ""
@ -91,6 +90,7 @@ class AwsCliClient(S3ClientWrapper):
if location_constraint: if location_constraint:
cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}"
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
sleep(S3_SYNC_WAIT_TIME)
return bucket return bucket
@ -105,6 +105,7 @@ class AwsCliClient(S3ClientWrapper):
def delete_bucket(self, bucket: str) -> None: def delete_bucket(self, bucket: str) -> None:
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
self.local_shell.exec(cmd, command_options) self.local_shell.exec(cmd, command_options)
sleep(S3_SYNC_WAIT_TIME)
@reporter.step("Head bucket S3") @reporter.step("Head bucket S3")
def head_bucket(self, bucket: str) -> None: def head_bucket(self, bucket: str) -> None:
@ -152,7 +153,8 @@ class AwsCliClient(S3ClientWrapper):
@reporter.step("Get bucket acl") @reporter.step("Get bucket acl")
def get_bucket_acl(self, bucket: str) -> list: def get_bucket_acl(self, bucket: str) -> list:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
) )
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
@ -170,7 +172,10 @@ class AwsCliClient(S3ClientWrapper):
@reporter.step("List objects S3") @reporter.step("List objects S3")
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = (
f"aws {self.common_flags} s3api list-objects --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
@ -229,7 +234,7 @@ class AwsCliClient(S3ClientWrapper):
if bucket is None: if bucket is None:
bucket = source_bucket bucket = source_bucket
if key is None: if key is None:
key = string_utils.unique_name("copy-object-") key = os.path.join(os.getcwd(), str(uuid.uuid4()))
copy_source = f"{source_bucket}/{source_key}" copy_source = f"{source_bucket}/{source_key}"
cmd = ( cmd = (
@ -314,18 +319,18 @@ class AwsCliClient(S3ClientWrapper):
version_id: Optional[str] = None, version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None, object_range: Optional[tuple[int, int]] = None,
full_output: bool = False, full_output: bool = False,
) -> dict | TestFile: ) -> Union[dict, str]:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
version = f" --version-id {version_id}" if version_id else "" version = f" --version-id {version_id}" if version_id else ""
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} "
f"{version} {test_file} --endpoint {self.s3gate_endpoint} --profile {self.profile}" f"{version} {file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
) )
if object_range: if object_range:
cmd += f" --range bytes={object_range[0]}-{object_range[1]}" cmd += f" --range bytes={object_range[0]}-{object_range[1]}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
return response if full_output else test_file return response if full_output else file_path
@reporter.step("Get object ACL") @reporter.step("Get object ACL")
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
@ -395,6 +400,7 @@ class AwsCliClient(S3ClientWrapper):
) )
output = self.local_shell.exec(cmd, command_options).stdout output = self.local_shell.exec(cmd, command_options).stdout
response = self._to_json(output) response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME)
return response return response
@reporter.step("Delete object S3") @reporter.step("Delete object S3")
@ -405,6 +411,7 @@ class AwsCliClient(S3ClientWrapper):
f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
) )
output = self.local_shell.exec(cmd, command_options).stdout output = self.local_shell.exec(cmd, command_options).stdout
sleep(S3_SYNC_WAIT_TIME)
return self._to_json(output) return self._to_json(output)
@reporter.step("Delete object versions S3") @reporter.step("Delete object versions S3")
@ -431,6 +438,7 @@ class AwsCliClient(S3ClientWrapper):
f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
) )
output = self.local_shell.exec(cmd, command_options).stdout output = self.local_shell.exec(cmd, command_options).stdout
sleep(S3_SYNC_WAIT_TIME)
return self._to_json(output) return self._to_json(output)
@reporter.step("Delete object versions S3 without delete markers") @reporter.step("Delete object versions S3 without delete markers")
@ -481,16 +489,6 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response.get("Policy") return response.get("Policy")
@reporter.step("Delete bucket policy")
def delete_bucket_policy(self, bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Put bucket policy") @reporter.step("Put bucket policy")
def put_bucket_policy(self, bucket: str, policy: dict) -> None: def put_bucket_policy(self, bucket: str, policy: dict) -> None:
# Leaving it as is was in test repo. Double dumps to escape resulting string # Leaving it as is was in test repo. Double dumps to escape resulting string
@ -575,7 +573,7 @@ class AwsCliClient(S3ClientWrapper):
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step("Put object tagging") @reporter.step("Put object tagging")
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags} tagging = {"TagSet": tags}
version = f" --version-id {version_id}" if version_id else "" version = f" --version-id {version_id}" if version_id else ""
@ -614,7 +612,8 @@ class AwsCliClient(S3ClientWrapper):
metadata: Optional[dict] = None, metadata: Optional[dict] = None,
) -> dict: ) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} "
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
) )
if metadata: if metadata:
cmd += " --metadata" cmd += " --metadata"
@ -730,10 +729,7 @@ class AwsCliClient(S3ClientWrapper):
f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} "
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
) )
output = self.local_shell.exec(cmd).stdout self.local_shell.exec(cmd)
response = self._to_json(output)
return response
@reporter.step("Put object lock configuration") @reporter.step("Put object lock configuration")
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
@ -770,7 +766,9 @@ class AwsCliClient(S3ClientWrapper):
@reporter.step("Adds the specified user to the specified group") @reporter.step("Adds the specified user to the specified group")
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -778,9 +776,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Attaches the specified managed policy to the specified IAM group") @reporter.step("Attaches the specified managed policy to the specified IAM group")
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -789,9 +790,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Attaches the specified managed policy to the specified user") @reporter.step("Attaches the specified managed policy to the specified user")
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -800,9 +804,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user") @reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
def iam_create_access_key(self, user_name: Optional[str] = None) -> dict: def iam_create_access_key(self, user_name: Optional[str] = None) -> dict:
cmd = f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
if user_name: if user_name:
@ -817,9 +824,12 @@ class AwsCliClient(S3ClientWrapper):
return access_key_id, secret_access_key return access_key_id, secret_access_key
@reporter.step("Creates a new group") @reporter.step("Creates a new group")
def iam_create_group(self, group_name: str) -> dict: def iam_create_group(self, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -830,6 +840,7 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Creates a new managed policy for your AWS account") @reporter.step("Creates a new managed policy for your AWS account")
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
cmd = ( cmd = (
@ -847,9 +858,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Creates a new IAM user for your AWS account") @reporter.step("Creates a new IAM user for your AWS account")
def iam_create_user(self, user_name: str) -> dict: def iam_create_user(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -860,9 +874,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Deletes the access key pair associated with the specified IAM user") @reporter.step("Deletes the access key pair associated with the specified IAM user")
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
@ -871,9 +888,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Deletes the specified IAM group") @reporter.step("Deletes the specified IAM group")
def iam_delete_group(self, group_name: str) -> dict: def iam_delete_group(self, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -881,9 +901,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group")
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -891,9 +914,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Deletes the specified managed policy") @reporter.step("Deletes the specified managed policy")
def iam_delete_policy(self, policy_arn: str) -> dict: def iam_delete_policy(self, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -901,19 +927,26 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Deletes the specified IAM user") @reporter.step("Deletes the specified IAM user")
def iam_delete_user(self, user_name: str) -> dict: def iam_delete_user(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
return response return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user")
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -921,9 +954,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Removes the specified managed policy from the specified IAM group") @reporter.step("Removes the specified managed policy from the specified IAM group")
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -932,9 +968,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Removes the specified managed policy from the specified user") @reporter.step("Removes the specified managed policy from the specified user")
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -943,9 +982,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Returns a list of IAM users that are in the specified IAM group") @reporter.step("Returns a list of IAM users that are in the specified IAM group")
def iam_get_group(self, group_name: str) -> dict: def iam_get_group(self, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -956,9 +998,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group")
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
cmd = f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -966,22 +1011,28 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Retrieves information about the specified managed policy") @reporter.step("Retrieves information about the specified managed policy")
def iam_get_policy(self, policy_arn: str) -> dict: def iam_get_policy(self, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response.get("Policy"), f"Expected Policy in response:\n{response}"
assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
return response return response
@reporter.step("Retrieves information about the specified version of the specified managed policy") @reporter.step("Retrieves information about the specified version of the specified managed policy")
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
cmd = f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -992,9 +1043,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Retrieves information about the specified IAM user") @reporter.step("Retrieves information about the specified IAM user")
def iam_get_user(self, user_name: str) -> dict: def iam_get_user(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1005,9 +1059,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user")
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
cmd = f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1017,9 +1074,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Returns information about the access key IDs associated with the specified IAM user") @reporter.step("Returns information about the access key IDs associated with the specified IAM user")
def iam_list_access_keys(self, user_name: str) -> dict: def iam_list_access_keys(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1027,9 +1087,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Lists all managed policies that are attached to the specified IAM group") @reporter.step("Lists all managed policies that are attached to the specified IAM group")
def iam_list_attached_group_policies(self, group_name: str) -> dict: def iam_list_attached_group_policies(self, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1039,9 +1102,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Lists all managed policies that are attached to the specified IAM user") @reporter.step("Lists all managed policies that are attached to the specified IAM user")
def iam_list_attached_user_policies(self, user_name: str) -> dict: def iam_list_attached_user_policies(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1051,9 +1117,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
def iam_list_entities_for_policy(self, policy_arn: str) -> dict: def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1064,9 +1133,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group")
def iam_list_group_policies(self, group_name: str) -> dict: def iam_list_group_policies(self, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1076,9 +1148,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Lists the IAM groups") @reporter.step("Lists the IAM groups")
def iam_list_groups(self) -> dict: def iam_list_groups(self) -> dict:
cmd = f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1088,9 +1163,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Lists the IAM groups that the specified IAM user belongs to") @reporter.step("Lists the IAM groups that the specified IAM user belongs to")
def iam_list_groups_for_user(self, user_name: str) -> dict: def iam_list_groups_for_user(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1100,21 +1178,27 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Lists all the managed policies that are available in your AWS account") @reporter.step("Lists all the managed policies that are available in your AWS account")
def iam_list_policies(self) -> dict: def iam_list_policies(self) -> dict:
cmd = f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
assert "Policies" in response.keys(), f"Expected Policies in response:\n{response}" assert 'Policies' in response.keys(), f"Expected Policies in response:\n{response}"
return response return response
@reporter.step("Lists information about the versions of the specified managed policy") @reporter.step("Lists information about the versions of the specified managed policy")
def iam_list_policy_versions(self, policy_arn: str) -> dict: def iam_list_policy_versions(self, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1124,9 +1208,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Lists the names of the inline policies embedded in the specified IAM user") @reporter.step("Lists the names of the inline policies embedded in the specified IAM user")
def iam_list_user_policies(self, user_name: str) -> dict: def iam_list_user_policies(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1136,9 +1223,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Lists the IAM users") @reporter.step("Lists the IAM users")
def iam_list_users(self) -> dict: def iam_list_users(self) -> dict:
cmd = f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}"
)
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
@ -1148,11 +1238,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group")
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}" f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}"
f" --group-name {group_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" f" --group-name {group_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'"
) )
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
@ -1162,11 +1253,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}" f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}"
f" --user-name {user_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" f" --user-name {user_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'"
) )
if self.profile: if self.profile:
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
@ -1177,6 +1269,7 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Removes the specified user from the specified group") @reporter.step("Removes the specified user from the specified group")
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
cmd = ( cmd = (
@ -1190,9 +1283,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Updates the name and/or the path of the specified IAM group") @reporter.step("Updates the name and/or the path of the specified IAM group")
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
cmd = f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}"
)
if new_name: if new_name:
cmd += f" --new-group-name {new_name}" cmd += f" --new-group-name {new_name}"
if new_path: if new_path:
@ -1205,9 +1301,12 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Updates the name and/or the path of the specified IAM user") @reporter.step("Updates the name and/or the path of the specified IAM user")
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
cmd = f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" cmd = (
f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}"
)
if new_name: if new_name:
cmd += f" --new-user-name {new_name}" cmd += f" --new-user-name {new_name}"
if new_path: if new_path:
@ -1220,39 +1319,4 @@ class AwsCliClient(S3ClientWrapper):
return response return response
@reporter.step("Adds one or more tags to an IAM user")
def iam_tag_user(self, user_name: str, tags: list) -> dict:
tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
cmd = (
f"aws {self.common_flags} iam tag-user --user-name {user_name} --tags '{json.dumps(tags_json)}' --endpoint {self.iam_endpoint}"
)
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("List tags of IAM user")
def iam_list_user_tags(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Removes the specified tags from the user")
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
tag_keys_joined = " ".join(tag_keys)
cmd = f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response

View file

@ -1,6 +1,7 @@
import json import json
import logging import logging
import os import os
import uuid
from datetime import datetime from datetime import datetime
from functools import wraps from functools import wraps
from time import sleep from time import sleep
@ -15,11 +16,10 @@ from mypy_boto3_s3 import S3Client
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.utils import string_utils from frostfs_testlib.utils.cli_utils import log_command_execution
# TODO: Refactor this code to use shell instead of _cmd_run # TODO: Refactor this code to use shell instead of _cmd_run
from frostfs_testlib.utils.cli_utils import log_command_execution from frostfs_testlib.utils.cli_utils import _configure_aws_cli
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -34,15 +34,7 @@ def report_error(func):
try: try:
return func(*a, **kw) return func(*a, **kw)
except ClientError as err: except ClientError as err:
url = None log_command_execution("Result", str(err))
params = {"args": a, "kwargs": kw}
if isinstance(a[0], Boto3ClientWrapper):
client: Boto3ClientWrapper = a[0]
url = client.s3gate_endpoint
params = {"args": a[1:], "kwargs": kw}
log_command_execution(url, f"Failed {err.operation_name}", err.response, params)
raise raise
return deco return deco
@ -88,6 +80,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
verify=False, verify=False,
) )
@reporter.step("Set endpoint IAM to {iam_endpoint}") @reporter.step("Set endpoint IAM to {iam_endpoint}")
def set_iam_endpoint(self, iam_endpoint: str): def set_iam_endpoint(self, iam_endpoint: str):
self.boto3_iam_client = self.session.client( self.boto3_iam_client = self.session.client(
@ -95,10 +88,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
aws_access_key_id=self.access_key_id, aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key, aws_secret_access_key=self.secret_access_key,
endpoint_url=iam_endpoint, endpoint_url=iam_endpoint,
verify=False, verify=False,)
)
def _to_s3_param(self, param: str) -> str:
def _to_s3_param(self, param: str):
replacement_map = { replacement_map = {
"Acl": "ACL", "Acl": "ACL",
"Cors": "CORS", "Cors": "CORS",
@ -109,11 +102,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
result = result.replace(find, replace) result = result.replace(find, replace)
return result return result
def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict:
if not exclude:
exclude = ["self"]
return {self._to_s3_param(param): value for param, value in scope if param not in exclude and value is not None}
# BUCKET METHODS # # BUCKET METHODS #
@reporter.step("Create bucket S3") @reporter.step("Create bucket S3")
@report_error @report_error
@ -128,7 +116,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
location_constraint: Optional[str] = None, location_constraint: Optional[str] = None,
) -> str: ) -> str:
if bucket is None: if bucket is None:
bucket = string_utils.unique_name("bucket-") bucket = str(uuid.uuid4())
params = {"Bucket": bucket} params = {"Bucket": bucket}
if object_lock_enabled_for_bucket is not None: if object_lock_enabled_for_bucket is not None:
@ -146,7 +134,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}})
s3_bucket = self.boto3_client.create_bucket(**params) s3_bucket = self.boto3_client.create_bucket(**params)
log_command_execution(self.s3gate_endpoint, f"Created S3 bucket {bucket}", s3_bucket, params) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket)
sleep(S3_SYNC_WAIT_TIME * 10)
return bucket return bucket
@reporter.step("List buckets S3") @reporter.step("List buckets S3")
@ -155,7 +144,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
found_buckets = [] found_buckets = []
response = self.boto3_client.list_buckets() response = self.boto3_client.list_buckets()
log_command_execution(self.s3gate_endpoint, "S3 List buckets result", response) log_command_execution("S3 List buckets result", response)
for bucket in response["Buckets"]: for bucket in response["Buckets"]:
found_buckets.append(bucket["Name"]) found_buckets.append(bucket["Name"])
@ -166,27 +155,29 @@ class Boto3ClientWrapper(S3ClientWrapper):
@report_error @report_error
def delete_bucket(self, bucket: str) -> None: def delete_bucket(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket(Bucket=bucket) response = self.boto3_client.delete_bucket(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 Delete bucket result", response, {"Bucket": bucket}) log_command_execution("S3 Delete bucket result", response)
sleep(S3_SYNC_WAIT_TIME * 10)
@reporter.step("Head bucket S3") @reporter.step("Head bucket S3")
@report_error @report_error
def head_bucket(self, bucket: str) -> None: def head_bucket(self, bucket: str) -> None:
response = self.boto3_client.head_bucket(Bucket=bucket) response = self.boto3_client.head_bucket(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 Head bucket result", response, {"Bucket": bucket}) log_command_execution("S3 Head bucket result", response)
@reporter.step("Put bucket versioning status") @reporter.step("Put bucket versioning status")
@report_error @report_error
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}} response = self.boto3_client.put_bucket_versioning(
response = self.boto3_client.put_bucket_versioning(**params) Bucket=bucket, VersioningConfiguration={"Status": status.value}
log_command_execution(self.s3gate_endpoint, "S3 Set bucket versioning to", response, params) )
log_command_execution("S3 Set bucket versioning to", response)
@reporter.step("Get bucket versioning status") @reporter.step("Get bucket versioning status")
@report_error @report_error
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
response = self.boto3_client.get_bucket_versioning(Bucket=bucket) response = self.boto3_client.get_bucket_versioning(Bucket=bucket)
status = response.get("Status") status = response.get("Status")
log_command_execution(self.s3gate_endpoint, "S3 Got bucket versioning status", response, {"Bucket": bucket}) log_command_execution("S3 Got bucket versioning status", response)
return status return status
@reporter.step("Put bucket tagging") @reporter.step("Put bucket tagging")
@ -194,29 +185,28 @@ class Boto3ClientWrapper(S3ClientWrapper):
def put_bucket_tagging(self, bucket: str, tags: list) -> None: def put_bucket_tagging(self, bucket: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags} tagging = {"TagSet": tags}
params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging)
response = self.boto3_client.put_bucket_tagging(**params) log_command_execution("S3 Put bucket tagging", response)
log_command_execution(self.s3gate_endpoint, "S3 Put bucket tagging", response, params)
@reporter.step("Get bucket tagging") @reporter.step("Get bucket tagging")
@report_error @report_error
def get_bucket_tagging(self, bucket: str) -> list: def get_bucket_tagging(self, bucket: str) -> list:
response = self.boto3_client.get_bucket_tagging(Bucket=bucket) response = self.boto3_client.get_bucket_tagging(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 Get bucket tagging", response, {"Bucket": bucket}) log_command_execution("S3 Get bucket tagging", response)
return response.get("TagSet") return response.get("TagSet")
@reporter.step("Get bucket acl") @reporter.step("Get bucket acl")
@report_error @report_error
def get_bucket_acl(self, bucket: str) -> list: def get_bucket_acl(self, bucket: str) -> list:
response = self.boto3_client.get_bucket_acl(Bucket=bucket) response = self.boto3_client.get_bucket_acl(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 Get bucket acl", response, {"Bucket": bucket}) log_command_execution("S3 Get bucket acl", response)
return response.get("Grants") return response.get("Grants")
@reporter.step("Delete bucket tagging") @reporter.step("Delete bucket tagging")
@report_error @report_error
def delete_bucket_tagging(self, bucket: str) -> None: def delete_bucket_tagging(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) response = self.boto3_client.delete_bucket_tagging(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 Delete bucket tagging", response, {"Bucket": bucket}) log_command_execution("S3 Delete bucket tagging", response)
@reporter.step("Put bucket ACL") @reporter.step("Put bucket ACL")
@report_error @report_error
@ -227,74 +217,68 @@ class Boto3ClientWrapper(S3ClientWrapper):
grant_write: Optional[str] = None, grant_write: Optional[str] = None,
grant_read: Optional[str] = None, grant_read: Optional[str] = None,
) -> None: ) -> None:
params = self._convert_to_s3_params(locals().items()) params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.put_bucket_acl(**params) response = self.boto3_client.put_bucket_acl(**params)
log_command_execution(self.s3gate_endpoint, "S3 ACL bucket result", response, params) log_command_execution("S3 ACL bucket result", response)
@reporter.step("Put object lock configuration") @reporter.step("Put object lock configuration")
@report_error @report_error
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
params = {"Bucket": bucket, "ObjectLockConfiguration": configuration} response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration)
response = self.boto3_client.put_object_lock_configuration(**params) log_command_execution("S3 put_object_lock_configuration result", response)
log_command_execution(self.s3gate_endpoint, "S3 put_object_lock_configuration result", response, params)
return response return response
@reporter.step("Get object lock configuration") @reporter.step("Get object lock configuration")
@report_error @report_error
def get_object_lock_configuration(self, bucket: str) -> dict: def get_object_lock_configuration(self, bucket: str) -> dict:
response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) response = self.boto3_client.get_object_lock_configuration(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 get_object_lock_configuration result", response, {"Bucket": bucket}) log_command_execution("S3 get_object_lock_configuration result", response)
return response.get("ObjectLockConfiguration") return response.get("ObjectLockConfiguration")
@reporter.step("Get bucket policy") @reporter.step("Get bucket policy")
@report_error @report_error
def get_bucket_policy(self, bucket: str) -> str: def get_bucket_policy(self, bucket: str) -> str:
response = self.boto3_client.get_bucket_policy(Bucket=bucket) response = self.boto3_client.get_bucket_policy(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 get_bucket_policy result", response, {"Bucket": bucket}) log_command_execution("S3 get_bucket_policy result", response)
return response.get("Policy") return response.get("Policy")
@reporter.step("Delete bucket policy")
@report_error
def delete_bucket_policy(self, bucket: str) -> str:
response = self.boto3_client.delete_bucket_policy(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_policy result", response, {"Bucket": bucket})
return response
@reporter.step("Put bucket policy") @reporter.step("Put bucket policy")
@report_error @report_error
def put_bucket_policy(self, bucket: str, policy: dict) -> None: def put_bucket_policy(self, bucket: str, policy: dict) -> None:
params = {"Bucket": bucket, "Policy": json.dumps(policy)} response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy))
response = self.boto3_client.put_bucket_policy(**params) log_command_execution("S3 put_bucket_policy result", response)
log_command_execution(self.s3gate_endpoint, "S3 put_bucket_policy result", response, params)
return response return response
@reporter.step("Get bucket cors") @reporter.step("Get bucket cors")
@report_error @report_error
def get_bucket_cors(self, bucket: str) -> dict: def get_bucket_cors(self, bucket: str) -> dict:
response = self.boto3_client.get_bucket_cors(Bucket=bucket) response = self.boto3_client.get_bucket_cors(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 get_bucket_cors result", response, {"Bucket": bucket}) log_command_execution("S3 get_bucket_cors result", response)
return response.get("CORSRules") return response.get("CORSRules")
@reporter.step("Get bucket location") @reporter.step("Get bucket location")
@report_error @report_error
def get_bucket_location(self, bucket: str) -> str: def get_bucket_location(self, bucket: str) -> str:
response = self.boto3_client.get_bucket_location(Bucket=bucket) response = self.boto3_client.get_bucket_location(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 get_bucket_location result", response, {"Bucket": bucket}) log_command_execution("S3 get_bucket_location result", response)
return response.get("LocationConstraint") return response.get("LocationConstraint")
@reporter.step("Put bucket cors") @reporter.step("Put bucket cors")
@report_error @report_error
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration)
response = self.boto3_client.put_bucket_cors(**params) log_command_execution("S3 put_bucket_cors result", response)
log_command_execution(self.s3gate_endpoint, "S3 put_bucket_cors result", response, params)
return response return response
@reporter.step("Delete bucket cors") @reporter.step("Delete bucket cors")
@report_error @report_error
def delete_bucket_cors(self, bucket: str) -> None: def delete_bucket_cors(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket_cors(Bucket=bucket) response = self.boto3_client.delete_bucket_cors(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_cors result", response, {"Bucket": bucket}) log_command_execution("S3 delete_bucket_cors result", response)
# END OF BUCKET METHODS # # END OF BUCKET METHODS #
# OBJECT METHODS # # OBJECT METHODS #
@ -302,9 +286,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("List objects S3 v2") @reporter.step("List objects S3 v2")
@report_error @report_error
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.list_objects_v2(Bucket=bucket) response = self.boto3_client.list_objects_v2(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 v2 List objects result", response, params) log_command_execution("S3 v2 List objects result", response)
obj_list = [obj["Key"] for obj in response.get("Contents", [])] obj_list = [obj["Key"] for obj in response.get("Contents", [])]
logger.info(f"Found s3 objects: {obj_list}") logger.info(f"Found s3 objects: {obj_list}")
@ -314,9 +297,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("List objects S3") @reporter.step("List objects S3")
@report_error @report_error
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.list_objects(Bucket=bucket) response = self.boto3_client.list_objects(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 List objects result", response, params) log_command_execution("S3 List objects result", response)
obj_list = [obj["Key"] for obj in response.get("Contents", [])] obj_list = [obj["Key"] for obj in response.get("Contents", [])]
logger.info(f"Found s3 objects: {obj_list}") logger.info(f"Found s3 objects: {obj_list}")
@ -326,17 +308,15 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("List objects versions S3") @reporter.step("List objects versions S3")
@report_error @report_error
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.list_object_versions(Bucket=bucket) response = self.boto3_client.list_object_versions(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 List objects versions result", response, params) log_command_execution("S3 List objects versions result", response)
return response if full_output else response.get("Versions", []) return response if full_output else response.get("Versions", [])
@reporter.step("List objects delete markers S3") @reporter.step("List objects delete markers S3")
@report_error @report_error
def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: def list_delete_markers(self, bucket: str, full_output: bool = False) -> list:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.list_object_versions(Bucket=bucket) response = self.boto3_client.list_object_versions(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 List objects delete markers result", response, params) log_command_execution("S3 List objects delete markers result", response)
return response if full_output else response.get("DeleteMarkers", []) return response if full_output else response.get("DeleteMarkers", [])
@reporter.step("Put object S3") @reporter.step("Put object S3")
@ -361,36 +341,49 @@ class Boto3ClientWrapper(S3ClientWrapper):
with open(filepath, "rb") as put_file: with open(filepath, "rb") as put_file:
body = put_file.read() body = put_file.read()
params = self._convert_to_s3_params(locals().items(), exclude=["self", "filepath", "put_file", "body"]) params = {
response = self.boto3_client.put_object(Body=body, **params) self._to_s3_param(param): value
log_command_execution(self.s3gate_endpoint, "S3 Put object result", response, params) for param, value in locals().items()
if param not in ["self", "filepath", "put_file"] and value is not None
}
response = self.boto3_client.put_object(**params)
log_command_execution("S3 Put object result", response)
return response.get("VersionId") return response.get("VersionId")
@reporter.step("Head object S3") @reporter.step("Head object S3")
@report_error @report_error
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = self._convert_to_s3_params(locals().items()) params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.head_object(**params) response = self.boto3_client.head_object(**params)
log_command_execution(self.s3gate_endpoint, "S3 Head object result", response, params) log_command_execution("S3 Head object result", response)
return response return response
@reporter.step("Delete object S3") @reporter.step("Delete object S3")
@report_error @report_error
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = self._convert_to_s3_params(locals().items()) params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.delete_object(**params) response = self.boto3_client.delete_object(**params)
log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) log_command_execution("S3 Delete object result", response)
sleep(S3_SYNC_WAIT_TIME * 10)
return response return response
@reporter.step("Delete objects S3") @reporter.step("Delete objects S3")
@report_error @report_error
def delete_objects(self, bucket: str, keys: list[str]) -> dict: def delete_objects(self, bucket: str, keys: list[str]) -> dict:
params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)} response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys))
response = self.boto3_client.delete_objects(**params) log_command_execution("S3 Delete objects result", response)
log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params)
assert ( assert (
"Errors" not in response "Errors" not in response
), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}'
sleep(S3_SYNC_WAIT_TIME * 10)
return response return response
@reporter.step("Delete object versions S3") @reporter.step("Delete object versions S3")
@ -406,9 +399,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
for object_version in object_versions for object_version in object_versions
] ]
} }
params = {"Bucket": bucket, "Delete": delete_list} response = self.boto3_client.delete_objects(Bucket=bucket, Delete=delete_list)
response = self.boto3_client.delete_objects(**params) log_command_execution("S3 Delete objects result", response)
log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params)
return response return response
@reporter.step("Delete object versions S3 without delete markers") @reporter.step("Delete object versions S3 without delete markers")
@ -416,9 +408,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
# Delete objects without creating delete markers # Delete objects without creating delete markers
for object_version in object_versions: for object_version in object_versions:
params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]} response = self.boto3_client.delete_object(
response = self.boto3_client.delete_object(**params) Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"]
log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) )
log_command_execution("S3 Delete object result", response)
@reporter.step("Put object ACL") @reporter.step("Put object ACL")
@report_error @report_error
@ -430,17 +423,19 @@ class Boto3ClientWrapper(S3ClientWrapper):
grant_write: Optional[str] = None, grant_write: Optional[str] = None,
grant_read: Optional[str] = None, grant_read: Optional[str] = None,
) -> list: ) -> list:
params = self._convert_to_s3_params(locals().items()) # pytest.skip("Method put_object_acl is not supported by boto3 client")
response = self.boto3_client.put_object_acl(**params) raise NotImplementedError("Unsupported for boto3 client")
log_command_execution(self.s3gate_endpoint, "S3 put object ACL", response, params)
return response.get("Grants")
@reporter.step("Get object ACL") @reporter.step("Get object ACL")
@report_error @report_error
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = self._convert_to_s3_params(locals().items()) params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.get_object_acl(**params) response = self.boto3_client.get_object_acl(**params)
log_command_execution(self.s3gate_endpoint, "S3 ACL objects result", response, params) log_command_execution("S3 ACL objects result", response)
return response.get("Grants") return response.get("Grants")
@reporter.step("Copy object S3") @reporter.step("Copy object S3")
@ -460,12 +455,16 @@ class Boto3ClientWrapper(S3ClientWrapper):
if bucket is None: if bucket is None:
bucket = source_bucket bucket = source_bucket
if key is None: if key is None:
key = string_utils.unique_name("copy-object-") key = os.path.join(os.getcwd(), str(uuid.uuid4()))
copy_source = f"{source_bucket}/{source_key}" copy_source = f"{source_bucket}/{source_key}"
params = self._convert_to_s3_params(locals().items(), exclude=["self", "source_bucket", "source_key"]) params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self", "source_bucket", "source_key"] and value is not None
}
response = self.boto3_client.copy_object(**params) response = self.boto3_client.copy_object(**params)
log_command_execution(self.s3gate_endpoint, "S3 Copy objects result", response, params) log_command_execution("S3 Copy objects result", response)
return key return key
@reporter.step("Get object S3") @reporter.step("Get object S3")
@ -477,35 +476,32 @@ class Boto3ClientWrapper(S3ClientWrapper):
version_id: Optional[str] = None, version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None, object_range: Optional[tuple[int, int]] = None,
full_output: bool = False, full_output: bool = False,
) -> dict | TestFile: ) -> Union[dict, str]:
filename = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
range_str = None range_str = None
if object_range: if object_range:
range_str = f"bytes={object_range[0]}-{object_range[1]}" range_str = f"bytes={object_range[0]}-{object_range[1]}"
params = self._convert_to_s3_params( params = {
{**locals(), **{"Range": range_str}}.items(), self._to_s3_param(param): value
exclude=["self", "object_range", "full_output", "range_str"], for param, value in {**locals(), **{"Range": range_str}}.items()
) if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None
}
response = self.boto3_client.get_object(**params) response = self.boto3_client.get_object(**params)
log_command_execution(self.s3gate_endpoint, "S3 Get objects result", response, params) log_command_execution("S3 Get objects result", response)
if full_output: with open(f"{filename}", "wb") as get_file:
return response
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-")))
with open(test_file, "wb") as file:
chunk = response["Body"].read(1024) chunk = response["Body"].read(1024)
while chunk: while chunk:
file.write(chunk) get_file.write(chunk)
chunk = response["Body"].read(1024) chunk = response["Body"].read(1024)
return test_file return response if full_output else filename
@reporter.step("Create multipart upload S3") @reporter.step("Create multipart upload S3")
@report_error @report_error
def create_multipart_upload(self, bucket: str, key: str) -> str: def create_multipart_upload(self, bucket: str, key: str) -> str:
params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key)
response = self.boto3_client.create_multipart_upload(**params) log_command_execution("S3 Created multipart upload", response)
log_command_execution(self.s3gate_endpoint, "S3 Created multipart upload", response, params)
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
return response["UploadId"] return response["UploadId"]
@ -514,16 +510,15 @@ class Boto3ClientWrapper(S3ClientWrapper):
@report_error @report_error
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
response = self.boto3_client.list_multipart_uploads(Bucket=bucket) response = self.boto3_client.list_multipart_uploads(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 List multipart upload", response, {"Bucket": bucket}) log_command_execution("S3 List multipart upload", response)
return response.get("Uploads") return response.get("Uploads")
@reporter.step("Abort multipart upload S3") @reporter.step("Abort multipart upload S3")
@report_error @report_error
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id)
response = self.boto3_client.abort_multipart_upload(**params) log_command_execution("S3 Abort multipart upload", response)
log_command_execution(self.s3gate_endpoint, "S3 Abort multipart upload", response, params)
@reporter.step("Upload part S3") @reporter.step("Upload part S3")
@report_error @report_error
@ -531,10 +526,14 @@ class Boto3ClientWrapper(S3ClientWrapper):
with open(filepath, "rb") as put_file: with open(filepath, "rb") as put_file:
body = put_file.read() body = put_file.read()
params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath", "body"]) response = self.boto3_client.upload_part(
params["PartNumber"] = part_num UploadId=upload_id,
response = self.boto3_client.upload_part(Body=body, **params) Bucket=bucket,
log_command_execution(self.s3gate_endpoint, "S3 Upload part", response, params) Key=key,
PartNumber=part_num,
Body=body,
)
log_command_execution("S3 Upload part", response)
assert response.get("ETag"), f"Expected ETag in response:\n{response}" assert response.get("ETag"), f"Expected ETag in response:\n{response}"
return response["ETag"] return response["ETag"]
@ -542,10 +541,14 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("Upload copy part S3") @reporter.step("Upload copy part S3")
@report_error @report_error
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath"]) response = self.boto3_client.upload_part_copy(
params["PartNumber"] = part_num UploadId=upload_id,
response = self.boto3_client.upload_part_copy(**params) Bucket=bucket,
log_command_execution(self.s3gate_endpoint, "S3 Upload copy part", response, params) Key=key,
PartNumber=part_num,
CopySource=copy_source,
)
log_command_execution("S3 Upload copy part", response)
assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}"
return response["CopyPartResult"]["ETag"] return response["CopyPartResult"]["ETag"]
@ -553,9 +556,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("List parts S3") @reporter.step("List parts S3")
@report_error @report_error
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key)
response = self.boto3_client.list_parts(**params) log_command_execution("S3 List part", response)
log_command_execution(self.s3gate_endpoint, "S3 List part", response, params)
assert response.get("Parts"), f"Expected Parts in response:\n{response}" assert response.get("Parts"), f"Expected Parts in response:\n{response}"
return response["Parts"] return response["Parts"]
@ -564,12 +566,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
@report_error @report_error
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
params = self._convert_to_s3_params(locals().items(), exclude=["self", "parts"]) response = self.boto3_client.complete_multipart_upload(
params["MultipartUpload"] = {"Parts": parts} Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts}
response = self.boto3_client.complete_multipart_upload(**params) )
log_command_execution(self.s3gate_endpoint, "S3 Complete multipart upload", response, params) log_command_execution("S3 Complete multipart upload", response)
return response
@reporter.step("Put object retention") @reporter.step("Put object retention")
@report_error @report_error
@ -581,9 +581,13 @@ class Boto3ClientWrapper(S3ClientWrapper):
version_id: Optional[str] = None, version_id: Optional[str] = None,
bypass_governance_retention: Optional[bool] = None, bypass_governance_retention: Optional[bool] = None,
) -> None: ) -> None:
params = self._convert_to_s3_params(locals().items()) params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.put_object_retention(**params) response = self.boto3_client.put_object_retention(**params)
log_command_execution(self.s3gate_endpoint, "S3 Put object retention ", response, params) log_command_execution("S3 Put object retention ", response)
@reporter.step("Put object legal hold") @reporter.step("Put object legal hold")
@report_error @report_error
@ -595,33 +599,39 @@ class Boto3ClientWrapper(S3ClientWrapper):
version_id: Optional[str] = None, version_id: Optional[str] = None,
) -> None: ) -> None:
legal_hold = {"Status": legal_hold_status} legal_hold = {"Status": legal_hold_status}
params = self._convert_to_s3_params(locals().items(), exclude=["self", "legal_hold_status"]) params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self", "legal_hold_status"] and value is not None
}
response = self.boto3_client.put_object_legal_hold(**params) response = self.boto3_client.put_object_legal_hold(**params)
log_command_execution(self.s3gate_endpoint, "S3 Put object legal hold ", response, params) log_command_execution("S3 Put object legal hold ", response)
@reporter.step("Put object tagging") @reporter.step("Put object tagging")
@report_error @report_error
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags} tagging = {"TagSet": tags}
params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id)
response = self.boto3_client.put_object_tagging(**params) log_command_execution("S3 Put object tagging", response)
log_command_execution(self.s3gate_endpoint, "S3 Put object tagging", response, params)
@reporter.step("Get object tagging") @reporter.step("Get object tagging")
@report_error @report_error
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = self._convert_to_s3_params(locals().items()) params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.get_object_tagging(**params) response = self.boto3_client.get_object_tagging(**params)
log_command_execution(self.s3gate_endpoint, "S3 Get object tagging", response, params) log_command_execution("S3 Get object tagging", response)
return response.get("TagSet") return response.get("TagSet")
@reporter.step("Delete object tagging") @reporter.step("Delete object tagging")
@report_error @report_error
def delete_object_tagging(self, bucket: str, key: str) -> None: def delete_object_tagging(self, bucket: str, key: str) -> None:
params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key)
response = self.boto3_client.delete_object_tagging(**params) log_command_execution("S3 Delete object tagging", response)
log_command_execution(self.s3gate_endpoint, "S3 Delete object tagging", response, params)
@reporter.step("Get object attributes") @reporter.step("Get object attributes")
@report_error @report_error
@ -662,6 +672,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
# END OBJECT METHODS # # END OBJECT METHODS #
# IAM METHODS # # IAM METHODS #
# Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.)
@ -670,18 +681,21 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name) response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name)
return response return response
@reporter.step("Attaches the specified managed policy to the specified IAM group") @reporter.step("Attaches the specified managed policy to the specified IAM group")
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 10)
return response return response
@reporter.step("Attaches the specified managed policy to the specified user") @reporter.step("Attaches the specified managed policy to the specified user")
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 10)
return response return response
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user") @reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
def iam_create_access_key(self, user_name: str) -> dict: def iam_create_access_key(self, user_name: str) -> dict:
response = self.boto3_iam_client.create_access_key(UserName=user_name) response = self.boto3_iam_client.create_access_key(UserName=user_name)
@ -693,6 +707,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return access_key_id, secret_access_key return access_key_id, secret_access_key
@reporter.step("Creates a new group") @reporter.step("Creates a new group")
def iam_create_group(self, group_name: str) -> dict: def iam_create_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.create_group(GroupName=group_name) response = self.boto3_iam_client.create_group(GroupName=group_name)
@ -701,6 +716,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Creates a new managed policy for your AWS account") @reporter.step("Creates a new managed policy for your AWS account")
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
@ -709,6 +725,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Creates a new IAM user for your AWS account") @reporter.step("Creates a new IAM user for your AWS account")
def iam_create_user(self, user_name: str) -> dict: def iam_create_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.create_user(UserName=user_name) response = self.boto3_iam_client.create_user(UserName=user_name)
@ -717,48 +734,57 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Deletes the access key pair associated with the specified IAM user") @reporter.step("Deletes the access key pair associated with the specified IAM user")
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name) response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name)
return response return response
@reporter.step("Deletes the specified IAM group") @reporter.step("Deletes the specified IAM group")
def iam_delete_group(self, group_name: str) -> dict: def iam_delete_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.delete_group(GroupName=group_name) response = self.boto3_iam_client.delete_group(GroupName=group_name)
return response return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group")
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name) response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name)
return response return response
@reporter.step("Deletes the specified managed policy") @reporter.step("Deletes the specified managed policy")
def iam_delete_policy(self, policy_arn: str) -> dict: def iam_delete_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn)
return response return response
@reporter.step("Deletes the specified IAM user") @reporter.step("Deletes the specified IAM user")
def iam_delete_user(self, user_name: str) -> dict: def iam_delete_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.delete_user(UserName=user_name) response = self.boto3_iam_client.delete_user(UserName=user_name)
return response return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user")
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name) response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name)
return response return response
@reporter.step("Removes the specified managed policy from the specified IAM group") @reporter.step("Removes the specified managed policy from the specified IAM group")
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn) response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 10)
return response return response
@reporter.step("Removes the specified managed policy from the specified user") @reporter.step("Removes the specified managed policy from the specified user")
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn) response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 10)
return response return response
@reporter.step("Returns a list of IAM users that are in the specified IAM group") @reporter.step("Returns a list of IAM users that are in the specified IAM group")
def iam_get_group(self, group_name: str) -> dict: def iam_get_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.get_group(GroupName=group_name) response = self.boto3_iam_client.get_group(GroupName=group_name)
@ -766,20 +792,23 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group")
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name)
return response return response
@reporter.step("Retrieves information about the specified managed policy") @reporter.step("Retrieves information about the specified managed policy")
def iam_get_policy(self, policy_arn: str) -> dict: def iam_get_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn)
assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response.get("Policy"), f"Expected Policy in response:\n{response}"
assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
return response return response
@reporter.step("Retrieves information about the specified version of the specified managed policy") @reporter.step("Retrieves information about the specified version of the specified managed policy")
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id) response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id)
@ -788,6 +817,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Retrieves information about the specified IAM user") @reporter.step("Retrieves information about the specified IAM user")
def iam_get_user(self, user_name: str) -> dict: def iam_get_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.get_user(UserName=user_name) response = self.boto3_iam_client.get_user(UserName=user_name)
@ -796,6 +826,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user")
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name)
@ -803,12 +834,14 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Returns information about the access key IDs associated with the specified IAM user") @reporter.step("Returns information about the access key IDs associated with the specified IAM user")
def iam_list_access_keys(self, user_name: str) -> dict: def iam_list_access_keys(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_access_keys(UserName=user_name) response = self.boto3_iam_client.list_access_keys(UserName=user_name)
return response return response
@reporter.step("Lists all managed policies that are attached to the specified IAM group") @reporter.step("Lists all managed policies that are attached to the specified IAM group")
def iam_list_attached_group_policies(self, group_name: str) -> dict: def iam_list_attached_group_policies(self, group_name: str) -> dict:
response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name)
@ -816,6 +849,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Lists all managed policies that are attached to the specified IAM user") @reporter.step("Lists all managed policies that are attached to the specified IAM user")
def iam_list_attached_user_policies(self, user_name: str) -> dict: def iam_list_attached_user_policies(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name)
@ -823,6 +857,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
def iam_list_entities_for_policy(self, policy_arn: str) -> dict: def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn)
@ -832,6 +867,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group")
def iam_list_group_policies(self, group_name: str) -> dict: def iam_list_group_policies(self, group_name: str) -> dict:
response = self.boto3_iam_client.list_group_policies(GroupName=group_name) response = self.boto3_iam_client.list_group_policies(GroupName=group_name)
@ -839,6 +875,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Lists the IAM groups") @reporter.step("Lists the IAM groups")
def iam_list_groups(self) -> dict: def iam_list_groups(self) -> dict:
response = self.boto3_iam_client.list_groups() response = self.boto3_iam_client.list_groups()
@ -846,6 +883,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Lists the IAM groups that the specified IAM user belongs to") @reporter.step("Lists the IAM groups that the specified IAM user belongs to")
def iam_list_groups_for_user(self, user_name: str) -> dict: def iam_list_groups_for_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) response = self.boto3_iam_client.list_groups_for_user(UserName=user_name)
@ -853,6 +891,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Lists all the managed policies that are available in your AWS account") @reporter.step("Lists all the managed policies that are available in your AWS account")
def iam_list_policies(self) -> dict: def iam_list_policies(self) -> dict:
response = self.boto3_iam_client.list_policies() response = self.boto3_iam_client.list_policies()
@ -860,6 +899,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Lists information about the versions of the specified managed policy") @reporter.step("Lists information about the versions of the specified managed policy")
def iam_list_policy_versions(self, policy_arn: str) -> dict: def iam_list_policy_versions(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn)
@ -867,6 +907,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Lists the names of the inline policies embedded in the specified IAM user") @reporter.step("Lists the names of the inline policies embedded in the specified IAM user")
def iam_list_user_policies(self, user_name: str) -> dict: def iam_list_user_policies(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_user_policies(UserName=user_name) response = self.boto3_iam_client.list_user_policies(UserName=user_name)
@ -874,6 +915,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Lists the IAM users") @reporter.step("Lists the IAM users")
def iam_list_users(self) -> dict: def iam_list_users(self) -> dict:
response = self.boto3_iam_client.list_users() response = self.boto3_iam_client.list_users()
@ -881,50 +923,35 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group")
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.put_group_policy( response = self.boto3_iam_client.put_group_policy(GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 10)
return response return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.put_user_policy( response = self.boto3_iam_client.put_user_policy(UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 10)
return response return response
@reporter.step("Removes the specified user from the specified group") @reporter.step("Removes the specified user from the specified group")
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name) response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name)
return response return response
@reporter.step("Updates the name and/or the path of the specified IAM group") @reporter.step("Updates the name and/or the path of the specified IAM group")
def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath="/") response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath='/')
return response return response
@reporter.step("Updates the name and/or the path of the specified IAM user") @reporter.step("Updates the name and/or the path of the specified IAM user")
def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath="/") response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath='/')
return response return response
@reporter.step("Adds one or more tags to an IAM user")
def iam_tag_user(self, user_name: str, tags: list) -> dict:
tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
response = self.boto3_iam_client.tag_user(UserName=user_name, Tags=tags_json)
return response
@reporter.step("List tags of IAM user")
def iam_list_user_tags(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_user_tags(UserName=user_name)
return response
@reporter.step("Removes the specified tags from the user")
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
response = self.boto3_iam_client.untag_user(UserName=user_name, TagKeys=tag_keys)
return response

View file

@ -4,7 +4,6 @@ from typing import Literal, Optional, Union
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum
from frostfs_testlib.utils.file_utils import TestFile
def _make_objs_dict(key_names): def _make_objs_dict(key_names):
@ -153,10 +152,6 @@ class S3ClientWrapper(HumanReadableABC):
def get_bucket_policy(self, bucket: str) -> str: def get_bucket_policy(self, bucket: str) -> str:
"""Returns the policy of a specified bucket.""" """Returns the policy of a specified bucket."""
@abstractmethod
def delete_bucket_policy(self, bucket: str) -> str:
"""Deletes the policy of a specified bucket."""
@abstractmethod @abstractmethod
def put_bucket_policy(self, bucket: str, policy: dict) -> None: def put_bucket_policy(self, bucket: str, policy: dict) -> None:
"""Applies S3 bucket policy to an S3 bucket.""" """Applies S3 bucket policy to an S3 bucket."""
@ -290,7 +285,7 @@ class S3ClientWrapper(HumanReadableABC):
version_id: Optional[str] = None, version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None, object_range: Optional[tuple[int, int]] = None,
full_output: bool = False, full_output: bool = False,
) -> dict | TestFile: ) -> Union[dict, str]:
"""Retrieves objects from S3.""" """Retrieves objects from S3."""
@abstractmethod @abstractmethod
@ -401,164 +396,153 @@ class S3ClientWrapper(HumanReadableABC):
# END OF OBJECT METHODS # # END OF OBJECT METHODS #
# IAM METHODS # # IAM METHODS #
@abstractmethod @abstractmethod
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
"""Adds the specified user to the specified group""" '''Adds the specified user to the specified group'''
@abstractmethod @abstractmethod
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict:
"""Attaches the specified managed policy to the specified IAM group""" '''Attaches the specified managed policy to the specified IAM group'''
@abstractmethod @abstractmethod
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
"""Attaches the specified managed policy to the specified user""" '''Attaches the specified managed policy to the specified user'''
@abstractmethod @abstractmethod
def iam_create_access_key(self, user_name: str) -> dict: def iam_create_access_key(self, user_name: str) -> dict:
"""Creates a new AWS secret access key and access key ID for the specified user""" '''Creates a new AWS secret access key and access key ID for the specified user'''
@abstractmethod @abstractmethod
def iam_create_group(self, group_name: str) -> dict: def iam_create_group(self, group_name: str) -> dict:
"""Creates a new group""" '''Creates a new group'''
@abstractmethod @abstractmethod
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
"""Creates a new managed policy for your AWS account""" '''Creates a new managed policy for your AWS account'''
@abstractmethod @abstractmethod
def iam_create_user(self, user_name: str) -> dict: def iam_create_user(self, user_name: str) -> dict:
"""Creates a new IAM user for your AWS account""" '''Creates a new IAM user for your AWS account'''
@abstractmethod @abstractmethod
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
"""Deletes the access key pair associated with the specified IAM user""" '''Deletes the access key pair associated with the specified IAM user'''
@abstractmethod @abstractmethod
def iam_delete_group(self, group_name: str) -> dict: def iam_delete_group(self, group_name: str) -> dict:
"""Deletes the specified IAM group""" '''Deletes the specified IAM group'''
@abstractmethod @abstractmethod
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
"""Deletes the specified inline policy that is embedded in the specified IAM group""" '''Deletes the specified inline policy that is embedded in the specified IAM group'''
@abstractmethod @abstractmethod
def iam_delete_policy(self, policy_arn: str) -> dict: def iam_delete_policy(self, policy_arn: str) -> dict:
"""Deletes the specified managed policy""" '''Deletes the specified managed policy'''
@abstractmethod @abstractmethod
def iam_delete_user(self, user_name: str) -> dict: def iam_delete_user(self, user_name: str) -> dict:
"""Deletes the specified IAM user""" '''Deletes the specified IAM user'''
@abstractmethod @abstractmethod
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
"""Deletes the specified inline policy that is embedded in the specified IAM user""" '''Deletes the specified inline policy that is embedded in the specified IAM user'''
@abstractmethod @abstractmethod
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
"""Removes the specified managed policy from the specified IAM group""" '''Removes the specified managed policy from the specified IAM group'''
@abstractmethod @abstractmethod
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
"""Removes the specified managed policy from the specified user""" '''Removes the specified managed policy from the specified user'''
@abstractmethod @abstractmethod
def iam_get_group(self, group_name: str) -> dict: def iam_get_group(self, group_name: str) -> dict:
"""Returns a list of IAM users that are in the specified IAM group""" '''Returns a list of IAM users that are in the specified IAM group'''
@abstractmethod @abstractmethod
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
"""Retrieves the specified inline policy document that is embedded in the specified IAM group""" '''Retrieves the specified inline policy document that is embedded in the specified IAM group'''
@abstractmethod @abstractmethod
def iam_get_policy(self, policy_arn: str) -> dict: def iam_get_policy(self, policy_arn: str) -> dict:
"""Retrieves information about the specified managed policy""" '''Retrieves information about the specified managed policy'''
@abstractmethod @abstractmethod
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
"""Retrieves information about the specified version of the specified managed policy""" '''Retrieves information about the specified version of the specified managed policy'''
@abstractmethod @abstractmethod
def iam_get_user(self, user_name: str) -> dict: def iam_get_user(self, user_name: str) -> dict:
"""Retrieves information about the specified IAM user""" '''Retrieves information about the specified IAM user'''
@abstractmethod @abstractmethod
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
"""Retrieves the specified inline policy document that is embedded in the specified IAM user""" '''Retrieves the specified inline policy document that is embedded in the specified IAM user'''
@abstractmethod @abstractmethod
def iam_list_access_keys(self, user_name: str) -> dict: def iam_list_access_keys(self, user_name: str) -> dict:
"""Returns information about the access key IDs associated with the specified IAM user""" '''Returns information about the access key IDs associated with the specified IAM user'''
@abstractmethod @abstractmethod
def iam_list_attached_group_policies(self, group_name: str) -> dict: def iam_list_attached_group_policies(self, group_name: str) -> dict:
"""Lists all managed policies that are attached to the specified IAM group""" '''Lists all managed policies that are attached to the specified IAM group'''
@abstractmethod @abstractmethod
def iam_list_attached_user_policies(self, user_name: str) -> dict: def iam_list_attached_user_policies(self, user_name: str) -> dict:
"""Lists all managed policies that are attached to the specified IAM user""" '''Lists all managed policies that are attached to the specified IAM user'''
@abstractmethod @abstractmethod
def iam_list_entities_for_policy(self, policy_arn: str) -> dict: def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
"""Lists all IAM users, groups, and roles that the specified managed policy is attached to""" '''Lists all IAM users, groups, and roles that the specified managed policy is attached to'''
@abstractmethod @abstractmethod
def iam_list_group_policies(self, group_name: str) -> dict: def iam_list_group_policies(self, group_name: str) -> dict:
"""Lists the names of the inline policies that are embedded in the specified IAM group""" '''Lists the names of the inline policies that are embedded in the specified IAM group'''
@abstractmethod @abstractmethod
def iam_list_groups(self) -> dict: def iam_list_groups(self) -> dict:
"""Lists the IAM groups""" '''Lists the IAM groups'''
@abstractmethod @abstractmethod
def iam_list_groups_for_user(self, user_name: str) -> dict: def iam_list_groups_for_user(self, user_name: str) -> dict:
"""Lists the IAM groups that the specified IAM user belongs to""" '''Lists the IAM groups that the specified IAM user belongs to'''
@abstractmethod @abstractmethod
def iam_list_policies(self) -> dict: def iam_list_policies(self) -> dict:
"""Lists all the managed policies that are available in your AWS account""" '''Lists all the managed policies that are available in your AWS account'''
@abstractmethod @abstractmethod
def iam_list_policy_versions(self, policy_arn: str) -> dict: def iam_list_policy_versions(self, policy_arn: str) -> dict:
"""Lists information about the versions of the specified managed policy""" '''Lists information about the versions of the specified managed policy'''
@abstractmethod @abstractmethod
def iam_list_user_policies(self, user_name: str) -> dict: def iam_list_user_policies(self, user_name: str) -> dict:
"""Lists the names of the inline policies embedded in the specified IAM user""" '''Lists the names of the inline policies embedded in the specified IAM user'''
@abstractmethod @abstractmethod
def iam_list_users(self) -> dict: def iam_list_users(self) -> dict:
"""Lists the IAM users""" '''Lists the IAM users'''
@abstractmethod @abstractmethod
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
"""Adds or updates an inline policy document that is embedded in the specified IAM group""" '''Adds or updates an inline policy document that is embedded in the specified IAM group'''
@abstractmethod @abstractmethod
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
"""Adds or updates an inline policy document that is embedded in the specified IAM user""" '''Adds or updates an inline policy document that is embedded in the specified IAM user'''
@abstractmethod @abstractmethod
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
"""Removes the specified user from the specified group""" '''Removes the specified user from the specified group'''
@abstractmethod @abstractmethod
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
"""Updates the name and/or the path of the specified IAM group""" '''Updates the name and/or the path of the specified IAM group'''
@abstractmethod @abstractmethod
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
"""Updates the name and/or the path of the specified IAM user""" '''Updates the name and/or the path of the specified IAM user'''
@abstractmethod
def iam_tag_user(self, user_name: str, tags: list) -> dict:
"""Adds one or more tags to an IAM user"""
@abstractmethod
def iam_list_user_tags(self, user_name: str) -> dict:
"""List tags of IAM user"""
@abstractmethod
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
"""Removes the specified tags from the user"""

View file

@ -1,18 +1,15 @@
import logging import logging
import subprocess import subprocess
import tempfile import tempfile
from contextlib import nullcontext
from datetime import datetime from datetime import datetime
from typing import IO, Optional from typing import IO, Optional
import pexpect import pexpect
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.common import MORE_LOG
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell
logger = logging.getLogger("frostfs.testlib.shell") logger = logging.getLogger("frostfs.testlib.shell")
step_context = reporter.step if MORE_LOG == "1" else nullcontext
class LocalShell(Shell): class LocalShell(Shell):
@ -31,10 +28,10 @@ class LocalShell(Shell):
for inspector in [*self.command_inspectors, *extra_inspectors]: for inspector in [*self.command_inspectors, *extra_inspectors]:
command = inspector.inspect(original_command, command) command = inspector.inspect(original_command, command)
with step_context(f"Executing command: {command}"): logger.info(f"Executing command: {command}")
if options.interactive_inputs: if options.interactive_inputs:
return self._exec_interactive(command, options) return self._exec_interactive(command, options)
return self._exec_non_interactive(command, options) return self._exec_non_interactive(command, options)
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
start_time = datetime.utcnow() start_time = datetime.utcnow()
@ -63,7 +60,9 @@ class LocalShell(Shell):
if options.check and result.return_code != 0: if options.check and result.return_code != 0:
raise RuntimeError( raise RuntimeError(
f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n" f"Command: {command}\nreturn code: {result.return_code}\n"
f"Output: {result.stdout}\n"
f"Stderr: {result.stderr}\n"
) )
return result return result
@ -94,7 +93,9 @@ class LocalShell(Shell):
stderr="", stderr="",
return_code=exc.returncode, return_code=exc.returncode,
) )
raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc raise RuntimeError(
f"Command: {command}\nError:\n" f"return code: {exc.returncode}\n" f"output: {exc.output}"
) from exc
except OSError as exc: except OSError as exc:
raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc
finally: finally:
@ -128,19 +129,22 @@ class LocalShell(Shell):
end_time: datetime, end_time: datetime,
result: Optional[CommandResult], result: Optional[CommandResult],
) -> None: ) -> None:
if not result: # TODO: increase logging level if return code is non 0, should be warning at least
logger.warning(f"Command: {command}\n" f"Error: result is None") logger.info(
return f"Command: {command}\n"
f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n"
status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning) f"return code: {result.return_code if result else ''} "
log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}") f"\nOutput: {result.stdout if result else ''}"
elapsed_time = end_time - start_time
command_attachment = (
f"COMMAND: {command}\n"
f"RETCODE: {result.return_code}\n\n"
f"STDOUT:\n{result.stdout}\n"
f"STDERR:\n{result.stderr}\n"
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}"
) )
reporter.attach(command_attachment, "Command execution.txt")
if result:
elapsed_time = end_time - start_time
command_attachment = (
f"COMMAND: {command}\n"
f"RETCODE: {result.return_code}\n\n"
f"STDOUT:\n{result.stdout}\n"
f"STDERR:\n{result.stderr}\n"
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}"
)
with reporter.step(f"COMMAND: {command}"):
reporter.attach(command_attachment, "Command execution.txt")

View file

@ -200,6 +200,7 @@ def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Op
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout)
logger.info(f"Containers: \n{result}")
return result.stdout.split() return result.stdout.split()
@ -327,6 +328,13 @@ def _parse_cid(output: str) -> str:
return splitted[1] return splitted[1]
@reporter.step("Search container by name")
def search_container_by_name(name: str, node: ClusterNode):
resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product)
resolver: BucketContainerResolver = resolver_cls()
return resolver.resolve(node, name)
@reporter.step("Search for nodes with a container") @reporter.step("Search for nodes with a container")
def search_nodes_with_container( def search_nodes_with_container(
wallet: WalletInfo, wallet: WalletInfo,

View file

@ -13,10 +13,8 @@ from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import wait_for_success
from frostfs_testlib.utils import json_utils from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.cli_utils import parse_netmap_output from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -82,7 +80,7 @@ def get_object(
no_progress: bool = True, no_progress: bool = True,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> TestFile: ) -> str:
""" """
GET from FrostFS. GET from FrostFS.
@ -104,14 +102,14 @@ def get_object(
if not write_object: if not write_object:
write_object = str(uuid.uuid4()) write_object = str(uuid.uuid4())
test_file = TestFile(os.path.join(ASSETS_DIR, write_object)) file_path = os.path.join(ASSETS_DIR, write_object)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli.object.get( cli.object.get(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
cid=cid, cid=cid,
oid=oid, oid=oid,
file=test_file, file=file_path,
bearer=bearer, bearer=bearer,
no_progress=no_progress, no_progress=no_progress,
xhdr=xhdr, xhdr=xhdr,
@ -119,7 +117,7 @@ def get_object(
timeout=timeout, timeout=timeout,
) )
return test_file return file_path
@reporter.step("Get Range Hash from {endpoint}") @reporter.step("Get Range Hash from {endpoint}")
@ -358,7 +356,7 @@ def get_range(
Returns: Returns:
(str, bytes) - path to the file with range content and content of this file as bytes (str, bytes) - path to the file with range content and content of this file as bytes
""" """
test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli.object.range( cli.object.range(
@ -366,16 +364,16 @@ def get_range(
cid=cid, cid=cid,
oid=oid, oid=oid,
range=range_cut, range=range_cut,
file=test_file, file=range_file_path,
bearer=bearer, bearer=bearer,
xhdr=xhdr, xhdr=xhdr,
session=session, session=session,
timeout=timeout, timeout=timeout,
) )
with open(test_file, "rb") as file: with open(range_file_path, "rb") as file:
content = file.read() content = file.read()
return test_file, content return range_file_path, content
@reporter.step("Lock Object") @reporter.step("Lock Object")
@ -616,27 +614,27 @@ def head_object(
fst_line_idx = result.stdout.find("\n") fst_line_idx = result.stdout.find("\n")
decoded = json.loads(result.stdout[fst_line_idx:]) decoded = json.loads(result.stdout[fst_line_idx:])
# if response
if "chunks" in decoded.keys():
logger.info("decoding ec chunks")
return decoded["chunks"]
# If response is Complex Object header, it has `splitId` key # If response is Complex Object header, it has `splitId` key
if "splitId" in decoded.keys(): if "splitId" in decoded.keys():
logger.info("decoding split header")
return json_utils.decode_split_header(decoded) return json_utils.decode_split_header(decoded)
# If response is Last or Linking Object header, # If response is Last or Linking Object header,
# it has `header` dictionary and non-null `split` dictionary # it has `header` dictionary and non-null `split` dictionary
if "split" in decoded["header"].keys(): if "split" in decoded["header"].keys():
if decoded["header"]["split"]: if decoded["header"]["split"]:
logger.info("decoding linking object")
return json_utils.decode_linking_object(decoded) return json_utils.decode_linking_object(decoded)
if decoded["header"]["objectType"] == "STORAGE_GROUP": if decoded["header"]["objectType"] == "STORAGE_GROUP":
logger.info("decoding storage group")
return json_utils.decode_storage_group(decoded) return json_utils.decode_storage_group(decoded)
if decoded["header"]["objectType"] == "TOMBSTONE": if decoded["header"]["objectType"] == "TOMBSTONE":
logger.info("decoding tombstone")
return json_utils.decode_tombstone(decoded) return json_utils.decode_tombstone(decoded)
logger.info("decoding simple header")
return json_utils.decode_simple_header(decoded) return json_utils.decode_simple_header(decoded)
@ -690,16 +688,13 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
latest_block = first_line.split(":") latest_block = first_line.split(":")
# taking second line from command's output contain wallet key # taking second line from command's output contain wallet key
second_line = output.split("\n")[1] second_line = output.split("\n")[1]
if second_line != "": validated_state = second_line.split(":")
validated_state = second_line.split(":") return {
return { latest_block[0].replace(":", ""): int(latest_block[1]),
latest_block[0].replace(":", ""): int(latest_block[1]), validated_state[0].replace(":", ""): int(validated_state[1]),
validated_state[0].replace(":", ""): int(validated_state[1]), }
}
return {latest_block[0].replace(":", ""): int(latest_block[1])}
@wait_for_success()
@reporter.step("Search object nodes") @reporter.step("Search object nodes")
def get_object_nodes( def get_object_nodes(
cluster: Cluster, cluster: Cluster,
@ -719,27 +714,21 @@ def get_object_nodes(
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
response = cli.object.nodes( result_object_nodes = cli.object.nodes(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
cid=cid, cid=cid,
oid=oid, oid=oid,
bearer=bearer, bearer=bearer,
ttl=1 if is_direct else None, ttl=1 if is_direct else None,
json=True,
xhdr=xhdr, xhdr=xhdr,
timeout=timeout, timeout=timeout,
verify_presence_all=verify_presence_all, verify_presence_all=verify_presence_all,
) )
response_json = json.loads(response.stdout) parsing_output = parse_cmd_table(result_object_nodes.stdout, "|")
# Currently, the command will show expected and confirmed nodes. list_object_nodes = [
# And we (currently) count only nodes which are both expected and confirmed node for node in parsing_output if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true"
object_nodes_id = { ]
required_node
for data_object in response_json["data_objects"]
for required_node in data_object["required_nodes"]
if required_node in data_object["confirmed_nodes"]
}
netmap_nodes_list = parse_netmap_output( netmap_nodes_list = parse_netmap_output(
cli.netmap.snapshot( cli.netmap.snapshot(
@ -748,11 +737,14 @@ def get_object_nodes(
).stdout ).stdout
) )
netmap_nodes = [ netmap_nodes = [
netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id netmap_node
for object_node in list_object_nodes
for netmap_node in netmap_nodes_list
if object_node["node_id"] == netmap_node.node_id
] ]
object_nodes = [ result = [
cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip
] ]
return object_nodes return result

View file

@ -1,35 +0,0 @@
import logging
from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
logger = logging.getLogger("NeoLogger")
@reporter.step("Get Tree List")
def get_tree_list(
wallet: WalletInfo,
cid: str,
shell: Shell,
endpoint: str,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None:
"""
A wrapper for `frostfs-cli tree list` call.
Args:
wallet (WalletInfo): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
This function doesn't return anything.
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout)

View file

@ -12,7 +12,7 @@ import requests
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli import GenericCli from frostfs_testlib.cli import GenericCli
from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.s3.aws_cli_client import command_options
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.shell.local_shell import LocalShell
@ -20,10 +20,11 @@ from frostfs_testlib.steps.cli.object import get_object
from frostfs_testlib.steps.storage_policy import get_nodes_without_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
from frostfs_testlib.testing.test_control import retry from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils.file_utils import TestFile, get_file_hash from frostfs_testlib.utils.file_utils import get_file_hash
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
local_shell = LocalShell() local_shell = LocalShell()
@ -49,7 +50,9 @@ def get_via_http_gate(
else: else:
request = f"{node.http_gate.get_endpoint()}{request_path}" request = f"{node.http_gate.get_endpoint()}{request_path}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False) resp = requests.get(
request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False
)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception(
@ -63,10 +66,10 @@ def get_via_http_gate(
logger.info(f"Request: {request}") logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code) _attach_allure_step(request, resp.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")
with open(test_file, "wb") as file: with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file) shutil.copyfileobj(resp.raw, file)
return test_file return file_path
@reporter.step("Get via Zip HTTP Gate") @reporter.step("Get via Zip HTTP Gate")
@ -92,11 +95,11 @@ def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Opt
logger.info(f"Request: {request}") logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code) _attach_allure_step(request, resp.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")) file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")
with open(test_file, "wb") as file: with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file) shutil.copyfileobj(resp.raw, file)
with zipfile.ZipFile(test_file, "r") as zip_ref: with zipfile.ZipFile(file_path, "r") as zip_ref:
zip_ref.extractall(ASSETS_DIR) zip_ref.extractall(ASSETS_DIR)
return os.path.join(os.getcwd(), ASSETS_DIR, prefix) return os.path.join(os.getcwd(), ASSETS_DIR, prefix)
@ -115,6 +118,7 @@ def get_via_http_gate_by_attribute(
cid: CID to get object from cid: CID to get object from
attribute: attribute {name: attribute} value pair attribute: attribute {name: attribute} value pair
endpoint: http gate endpoint endpoint: http gate endpoint
http_hostname: http host name on the node
request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}]
""" """
attr_name = list(attribute.keys())[0] attr_name = list(attribute.keys())[0]
@ -125,7 +129,9 @@ def get_via_http_gate_by_attribute(
else: else:
request = f"{node.http_gate.get_endpoint()}{request_path}" request = f"{node.http_gate.get_endpoint()}{request_path}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False) resp = requests.get(
request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]}
)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception(
@ -139,14 +145,17 @@ def get_via_http_gate_by_attribute(
logger.info(f"Request: {request}") logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code) _attach_allure_step(request, resp.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")) file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")
with open(test_file, "wb") as file: with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file) shutil.copyfileobj(resp.raw, file)
return test_file return file_path
# TODO: pass http_hostname as a header
@reporter.step("Upload via HTTP Gate") @reporter.step("Upload via HTTP Gate")
def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str: def upload_via_http_gate(
cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300
) -> str:
""" """
This function upload given object through HTTP gate This function upload given object through HTTP gate
cid: CID to get object from cid: CID to get object from
@ -189,6 +198,7 @@ def is_object_large(filepath: str) -> bool:
return False return False
# TODO: pass http_hostname as a header
@reporter.step("Upload via HTTP Gate using Curl") @reporter.step("Upload via HTTP Gate using Curl")
def upload_via_http_gate_curl( def upload_via_http_gate_curl(
cid: str, cid: str,
@ -238,7 +248,7 @@ def upload_via_http_gate_curl(
@retry(max_attempts=3, sleep_interval=1) @retry(max_attempts=3, sleep_interval=1)
@reporter.step("Get via HTTP Gate using Curl") @reporter.step("Get via HTTP Gate using Curl")
def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile: def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str:
""" """
This function gets given object from HTTP gate using curl utility. This function gets given object from HTTP gate using curl utility.
cid: CID to get object from cid: CID to get object from
@ -246,12 +256,12 @@ def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile:
node: node for request node: node for request
""" """
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")) file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
curl = GenericCli("curl", node.host) curl = GenericCli("curl", node.host)
curl(f"-k ", f"{request} > {test_file}", shell=local_shell) curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {file_path}", shell=local_shell)
return test_file return file_path
def _attach_allure_step(request: str, status_code: int, req_type="GET"): def _attach_allure_step(request: str, status_code: int, req_type="GET"):

View file

@ -1,45 +0,0 @@
import re
from frostfs_testlib import reporter
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.storage.cluster import ClusterNode
@reporter.step("Check metrics result")
@wait_for_success(interval=10)
def check_metrics_counter(
cluster_nodes: list[ClusterNode],
operator: str = "==",
counter_exp: int = 0,
parse_from_command: bool = False,
**metrics_greps: str,
):
counter_act = 0
for cluster_node in cluster_nodes:
counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps)
assert eval(
f"{counter_act} {operator} {counter_exp}"
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}"
@reporter.step("Get metrics value from node: {node}")
def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str):
try:
command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps)
if parse_from_command:
metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps)
else:
metrics_counter = calc_metrics_count_from_stdout(command_result.stdout)
except RuntimeError as e:
metrics_counter = 0
return metrics_counter
@reporter.step("Parse metrics count and calc sum of result")
def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None):
if command:
result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout)
else:
result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout)
return sum(map(lambda x: int(float(x)), result))

View file

@ -13,7 +13,6 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -112,7 +111,10 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
storage_wallet_path = node.get_wallet_path() storage_wallet_path = node.get_wallet_path()
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config)
return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout return cli.netmap.snapshot(
rpc_endpoint=node.get_rpc_endpoint(),
wallet=storage_wallet_path,
).stdout
@reporter.step("Get shard list for {node}") @reporter.step("Get shard list for {node}")
@ -200,7 +202,12 @@ def delete_node_data(node: StorageNode) -> None:
@reporter.step("Exclude node {node_to_exclude} from network map") @reporter.step("Exclude node {node_to_exclude} from network map")
def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: def exclude_node_from_network_map(
node_to_exclude: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None:
node_netmap_key = node_to_exclude.get_wallet_public_key() node_netmap_key = node_to_exclude.get_wallet_public_key()
storage_node_set_status(node_to_exclude, status="offline") storage_node_set_status(node_to_exclude, status="offline")
@ -214,7 +221,12 @@ def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: Stor
@reporter.step("Include node {node_to_include} into network map") @reporter.step("Include node {node_to_include} into network map")
def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: def include_node_to_network_map(
node_to_include: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None:
storage_node_set_status(node_to_include, status="online") storage_node_set_status(node_to_include, status="online")
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
@ -224,7 +236,7 @@ def include_node_to_network_map(node_to_include: StorageNode, alive_node: Storag
tick_epoch(shell, cluster) tick_epoch(shell, cluster)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
await_node_in_map(node_to_include, shell, alive_node) check_node_in_map(node_to_include, shell, alive_node)
@reporter.step("Check node {node} in network map") @reporter.step("Check node {node} in network map")
@ -238,11 +250,6 @@ def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[Stor
assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map"
@wait_for_success(300, 15, title="Await node {node} in network map")
def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
check_node_in_map(node, shell, alive_node)
@reporter.step("Check node {node} NOT in network map") @reporter.step("Check node {node} NOT in network map")
def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
alive_node = alive_node or node alive_node = alive_node or node
@ -269,7 +276,12 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None:
@reporter.step("Remove nodes from network map trough cli-adm morph command") @reporter.step("Remove nodes from network map trough cli-adm morph command")
def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None): def remove_nodes_from_map_morph(
shell: Shell,
cluster: Cluster,
remove_nodes: list[StorageNode],
alive_node: Optional[StorageNode] = None,
):
""" """
Move node to the Offline state in the candidates list and tick an epoch to update the netmap Move node to the Offline state in the candidates list and tick an epoch to update the netmap
using frostfs-adm using frostfs-adm
@ -288,5 +300,9 @@ def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: li
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfsadm = FrostfsAdm(
shell=remote_shell,
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
config_file=FROSTFS_ADM_CONFIG_PATH,
)
frostfsadm.morph.remove_nodes(node_netmap_keys) frostfsadm.morph.remove_nodes(node_netmap_keys)

View file

@ -7,9 +7,8 @@ from dateutil.parser import parse
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
@ -48,6 +47,7 @@ def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: Versi
if status == VersioningStatus.UNDEFINED: if status == VersioningStatus.UNDEFINED:
return return
s3_client.get_bucket_versioning_status(bucket)
s3_client.put_bucket_versioning(bucket, status=status) s3_client.put_bucket_versioning(bucket, status=status)
bucket_status = s3_client.get_bucket_versioning_status(bucket) bucket_status = s3_client.get_bucket_versioning_status(bucket)
assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}"
@ -120,28 +120,32 @@ def assert_object_lock_mode(
).days == retain_period, f"Expected retention period is {retain_period} days" ).days == retain_period, f"Expected retention period is {retain_period} days"
def _format_grants_as_strings(grants: list[dict]) -> list: def assert_s3_acl(acl_grants: list, permitted_users: str):
grantee_format = "{g_type}::{uri}:{permission}" if permitted_users == "AllUsers":
return set( grantees = {"AllUsers": 0, "CanonicalUser": 0}
[ for acl_grant in acl_grants:
grantee_format.format( if acl_grant.get("Grantee", {}).get("Type") == "Group":
g_type=grant.get("Grantee", {}).get("Type", ""), uri = acl_grant.get("Grantee", {}).get("URI")
uri=grant.get("Grantee", {}).get("URI", ""), permission = acl_grant.get("Permission")
permission=grant.get("Permission", ""), assert (uri, permission) == (
) "http://acs.amazonaws.com/groups/global/AllUsers",
for grant in grants "FULL_CONTROL",
] ), "All Groups should have FULL_CONTROL"
) grantees["AllUsers"] += 1
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL"
grantees["CanonicalUser"] += 1
assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL"
assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL"
if permitted_users == "CanonicalUser":
@reporter.step("Verify ACL permissions") for acl_grant in acl_grants:
def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True): if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
actual_grants = _format_grants_as_strings(actual_acl_grants) permission = acl_grant.get("Permission")
expected_grants = _format_grants_as_strings(expected_acl_grants) assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL"
else:
assert expected_grants <= actual_grants, "Permissions mismatch" logger.error("FULL_CONTROL is given to All Users")
if strict:
assert expected_grants == actual_grants, "Extra permissions found, must not be there"
@reporter.step("Delete bucket with all objects") @reporter.step("Delete bucket with all objects")
@ -176,11 +180,10 @@ def search_nodes_with_bucket(
wallet: WalletInfo, wallet: WalletInfo,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
bucket_container_resolver: BucketContainerResolver,
) -> list[ClusterNode]: ) -> list[ClusterNode]:
cid = None cid = None
for cluster_node in cluster.cluster_nodes: for cluster_node in cluster.cluster_nodes:
cid = bucket_container_resolver.resolve(cluster_node, bucket_name) cid = search_container_by_name(name=bucket_name, node=cluster_node)
if cid: if cid:
break break
nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster)

View file

@ -14,7 +14,6 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, Inner
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.service_registry import ServiceRegistry from frostfs_testlib.storage.service_registry import ServiceRegistry
from frostfs_testlib.storage.dataclasses.metrics import Metrics
class ClusterNode: class ClusterNode:
@ -25,13 +24,11 @@ class ClusterNode:
class_registry: ServiceRegistry class_registry: ServiceRegistry
id: int id: int
host: Host host: Host
metrics: Metrics
def __init__(self, host: Host, id: int) -> None: def __init__(self, host: Host, id: int) -> None:
self.host = host self.host = host
self.id = id self.id = id
self.class_registry = get_service_registry() self.class_registry = get_service_registry()
self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint())
@property @property
def host_ip(self): def host_ip(self):
@ -144,16 +141,30 @@ class ClusterNode:
return self.host.config.interfaces[interface.value] return self.host.config.interfaces[interface.value]
def get_data_interfaces(self) -> list[str]: def get_data_interfaces(self) -> list[str]:
return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface] return [
ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface
]
def get_data_interface(self, search_interface: str) -> list[str]: def get_data_interface(self, search_interface: str) -> list[str]:
return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_interface == interface] return [
self.host.config.interfaces[interface]
for interface in self.host.config.interfaces.keys()
if search_interface == interface
]
def get_internal_interfaces(self) -> list[str]: def get_internal_interfaces(self) -> list[str]:
return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "internal" in name_interface] return [
ip_address
for name_interface, ip_address in self.host.config.interfaces.items()
if "internal" in name_interface
]
def get_internal_interface(self, search_internal: str) -> list[str]: def get_internal_interface(self, search_internal: str) -> list[str]:
return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_internal == interface] return [
self.host.config.interfaces[interface]
for interface in self.host.config.interfaces.keys()
if search_internal == interface
]
class Cluster: class Cluster:
@ -164,6 +175,8 @@ class Cluster:
default_rpc_endpoint: str default_rpc_endpoint: str
default_s3_gate_endpoint: str default_s3_gate_endpoint: str
default_http_gate_endpoint: str default_http_gate_endpoint: str
default_http_hostname: str
default_s3_hostname: str
def __init__(self, hosting: Hosting) -> None: def __init__(self, hosting: Hosting) -> None:
self._hosting = hosting self._hosting = hosting
@ -172,6 +185,8 @@ class Cluster:
self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint() self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint()
self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint() self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint()
self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint() self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint()
self.default_http_hostname = self.services(StorageNode)[0].get_http_hostname()
self.default_s3_hostname = self.services(StorageNode)[0].get_s3_hostname()
@property @property
def hosts(self) -> list[Host]: def hosts(self) -> list[Host]:

View file

@ -12,15 +12,9 @@ class ConfigAttributes:
REMOTE_WALLET_CONFIG = "remote_wallet_config_path" REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_0 = "endpoint_data0"
ENDPOINT_DATA_1 = "endpoint_data1" ENDPOINT_DATA_1 = "endpoint_data1"
ENDPOINT_DATA_0_NS = "endpoint_data0_namespace"
ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_INTERNAL = "endpoint_internal0"
ENDPOINT_PROMETHEUS = "endpoint_prometheus" ENDPOINT_PROMETHEUS = "endpoint_prometheus"
CONTROL_ENDPOINT = "control_endpoint" CONTROL_ENDPOINT = "control_endpoint"
UN_LOCODE = "un_locode" UN_LOCODE = "un_locode"
HTTP_HOSTNAME = "http_hostname"
S3_HOSTNAME = "s3_hostname"
class PlacementRule:
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
DEFAULT_EC_PLACEMENT_RULE = "EC 3.1"

View file

@ -14,7 +14,6 @@ from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_E
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
from frostfs_testlib.steps.network import IpHelper from frostfs_testlib.steps.network import IpHelper
from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.controllers.disk_controller import DiskController
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
@ -40,7 +39,6 @@ class ClusterStateController:
self.stopped_nodes: list[ClusterNode] = [] self.stopped_nodes: list[ClusterNode] = []
self.detached_disks: dict[str, DiskController] = {} self.detached_disks: dict[str, DiskController] = {}
self.dropped_traffic: list[ClusterNode] = [] self.dropped_traffic: list[ClusterNode] = []
self.excluded_from_netmap: list[StorageNode] = []
self.stopped_services: set[NodeBase] = set() self.stopped_services: set[NodeBase] = set()
self.cluster = cluster self.cluster = cluster
self.healthcheck = healthcheck self.healthcheck = healthcheck
@ -309,17 +307,24 @@ class ClusterStateController:
self.suspended_services = {} self.suspended_services = {}
@reporter.step("Drop traffic to {node}, nodes - {block_nodes}") @reporter.step("Drop traffic to {node}, nodes - {block_nodes}")
def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: def drop_traffic(
self,
node: ClusterNode,
wakeup_timeout: int,
name_interface: str,
block_nodes: list[ClusterNode] = None,
) -> None:
list_ip = self._parse_interfaces(block_nodes, name_interface) list_ip = self._parse_interfaces(block_nodes, name_interface)
IpHelper.drop_input_traffic_to_node(node, list_ip) IpHelper.drop_input_traffic_to_node(node, list_ip)
time.sleep(wakeup_timeout) time.sleep(wakeup_timeout)
self.dropped_traffic.append(node) self.dropped_traffic.append(node)
@reporter.step("Start traffic to {node}") @reporter.step("Start traffic to {node}")
def restore_traffic(self, node: ClusterNode) -> None: def restore_traffic(
self,
node: ClusterNode,
) -> None:
IpHelper.restore_input_traffic_to_node(node=node) IpHelper.restore_input_traffic_to_node(node=node)
index = self.dropped_traffic.index(node)
self.dropped_traffic.pop(index)
@reporter.step("Restore blocked nodes") @reporter.step("Restore blocked nodes")
def restore_all_traffic(self): def restore_all_traffic(self):
@ -403,7 +408,9 @@ class ClusterStateController:
@reporter.step("Set MaintenanceModeAllowed - {status}") @reporter.step("Set MaintenanceModeAllowed - {status}")
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
frostfs_adm = FrostfsAdm( frostfs_adm = FrostfsAdm(
shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH shell=cluster_node.host.get_shell(),
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
config_file=FROSTFS_ADM_CONFIG_PATH,
) )
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
@ -425,44 +432,22 @@ class ClusterStateController:
if not await_tick: if not await_tick:
return return
with reporter.step("Tick 2 epoch with 2 block await."): with reporter.step("Tick 1 epoch and await 2 block"):
for _ in range(2): frostfs_adm.morph.force_new_epoch()
frostfs_adm.morph.force_new_epoch() time.sleep(parse_time(MORPH_BLOCK_TIME) * 2)
time.sleep(parse_time(MORPH_BLOCK_TIME) * 2)
self.await_node_status(status, wallet, cluster_node) self.await_node_status(status, wallet, cluster_node)
@wait_for_success(80, 8, title="Wait for node status become {status}") @wait_for_success(80, 8, title="Wait for node status become {status}")
def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode, checker_node: ClusterNode = None): def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode):
frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path)
if not checker_node: netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(cluster_node.storage_node.get_rpc_endpoint()).stdout)
checker_node = cluster_node
netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout)
netmap = [node for node in netmap if cluster_node.host_ip == node.node] netmap = [node for node in netmap if cluster_node.host_ip == node.node]
if status == NodeStatus.OFFLINE: if status == NodeStatus.OFFLINE:
assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline"
else: else:
assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'"
def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None:
alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0]
remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage)
self.excluded_from_netmap.extend(removes_nodes)
def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode):
include_node_to_network_map(include_node, alive_node, self.shell, self.cluster)
self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node))
def include_all_excluded_nodes(self):
if not self.excluded_from_netmap:
return
alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0]
if not alive_node:
return
for exclude_node in self.excluded_from_netmap.copy():
self.include_node_to_netmap(exclude_node, alive_node)
def _get_cli( def _get_cli(
self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode
) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: ) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
@ -479,7 +464,11 @@ class ClusterStateController:
frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path)
frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) frostfs_cli_remote = FrostfsCli(
shell=shell,
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
config_file=wallet_config_path,
)
return frostfs_adm, frostfs_cli, frostfs_cli_remote return frostfs_adm, frostfs_cli, frostfs_cli_remote
def _enable_date_synchronizer(self, cluster_node: ClusterNode): def _enable_date_synchronizer(self, cluster_node: ClusterNode):
@ -539,8 +528,3 @@ class ClusterStateController:
except Exception as err: except Exception as err:
logger.warning(f"Host ping fails with error {err}") logger.warning(f"Host ping fails with error {err}")
return HostStatus.ONLINE return HostStatus.ONLINE
@reporter.step("Get contract by domain - {domain_name}")
def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str):
frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC)
return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout

View file

@ -2,22 +2,22 @@ import json
from typing import Any from typing import Any
from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
class ShardsWatcher: class ShardsWatcher:
shards_snapshots: list[dict[str, Any]] = []
def __init__(self, node_under_test: ClusterNode) -> None: def __init__(self, node_under_test: ClusterNode) -> None:
self.shards_snapshots: list[dict[str, Any]] = []
self.storage_node = node_under_test.storage_node self.storage_node = node_under_test.storage_node
self.take_shards_snapshot() self.take_shards_snapshot()
def take_shards_snapshot(self) -> None: def take_shards_snapshot(self):
snapshot = self.get_shards_snapshot() snapshot = self.get_shards_snapshot()
self.shards_snapshots.append(snapshot) self.shards_snapshots.append(snapshot)
def get_shards_snapshot(self) -> dict[str, Any]: def get_shards_snapshot(self):
shards_snapshot: dict[str, Any] = {} shards_snapshot: dict[str, Any] = {}
shards = self.get_shards() shards = self.get_shards()
@ -26,17 +26,17 @@ class ShardsWatcher:
return shards_snapshot return shards_snapshot
def _get_current_snapshot(self) -> dict[str, Any]: def _get_current_snapshot(self):
return self.shards_snapshots[-1] return self.shards_snapshots[-1]
def _get_previous_snapshot(self) -> dict[str, Any]: def _get_previous_snapshot(self):
return self.shards_snapshots[-2] return self.shards_snapshots[-2]
def _is_shard_present(self, shard_id) -> bool: def _is_shard_present(self, shard_id):
snapshot = self._get_current_snapshot() snapshot = self._get_current_snapshot()
return shard_id in snapshot return shard_id in snapshot
def get_shards_with_new_errors(self) -> dict[str, Any]: def get_shards_with_new_errors(self):
current_snapshot = self._get_current_snapshot() current_snapshot = self._get_current_snapshot()
previous_snapshot = self._get_previous_snapshot() previous_snapshot = self._get_previous_snapshot()
shards_with_new_errors: dict[str, Any] = {} shards_with_new_errors: dict[str, Any] = {}
@ -46,7 +46,7 @@ class ShardsWatcher:
return shards_with_new_errors return shards_with_new_errors
def get_shards_with_errors(self) -> dict[str, Any]: def get_shards_with_errors(self):
snapshot = self.get_shards_snapshot() snapshot = self.get_shards_snapshot()
shards_with_errors: dict[str, Any] = {} shards_with_errors: dict[str, Any] = {}
for shard_id, shard in snapshot.items(): for shard_id, shard in snapshot.items():
@ -55,7 +55,7 @@ class ShardsWatcher:
return shards_with_errors return shards_with_errors
def get_shard_status(self, shard_id: str): # -> Any: def get_shard_status(self, shard_id: str):
snapshot = self.get_shards_snapshot() snapshot = self.get_shards_snapshot()
assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}" assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}"
@ -63,18 +63,18 @@ class ShardsWatcher:
return snapshot[shard_id]["mode"] return snapshot[shard_id]["mode"]
@wait_for_success(60, 2) @wait_for_success(60, 2)
def await_for_all_shards_status(self, status: str) -> None: def await_for_all_shards_status(self, status: str):
snapshot = self.get_shards_snapshot() snapshot = self.get_shards_snapshot()
for shard_id in snapshot: for shard_id in snapshot:
assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status" assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status"
@wait_for_success(60, 2) @wait_for_success(60, 2)
def await_for_shard_status(self, shard_id: str, status: str) -> None: def await_for_shard_status(self, shard_id: str, status: str):
assert self.get_shard_status(shard_id) == status assert self.get_shard_status(shard_id) == status
@wait_for_success(60, 2) @wait_for_success(60, 2)
def await_for_shard_have_new_errors(self, shard_id: str) -> None: def await_for_shard_have_new_errors(self, shard_id: str):
self.take_shards_snapshot() self.take_shards_snapshot()
assert self._is_shard_present(shard_id) assert self._is_shard_present(shard_id)
shards_with_new_errors = self.get_shards_with_new_errors() shards_with_new_errors = self.get_shards_with_new_errors()
@ -82,7 +82,7 @@ class ShardsWatcher:
assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}"
@wait_for_success(300, 5) @wait_for_success(300, 5)
def await_for_shards_have_no_new_errors(self) -> None: def await_for_shards_have_no_new_errors(self):
self.take_shards_snapshot() self.take_shards_snapshot()
shards_with_new_errors = self.get_shards_with_new_errors() shards_with_new_errors = self.get_shards_with_new_errors()
assert len(shards_with_new_errors) == 0 assert len(shards_with_new_errors) == 0
@ -102,7 +102,7 @@ class ShardsWatcher:
return json.loads(response.stdout.split(">", 1)[1]) return json.loads(response.stdout.split(">", 1)[1])
def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True) -> CommandResult: def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True):
shards_cli = FrostfsCliShards( shards_cli = FrostfsCliShards(
self.storage_node.host.get_shell(), self.storage_node.host.get_shell(),
self.storage_node.host.get_cli_config("frostfs-cli").exec_path, self.storage_node.host.get_cli_config("frostfs-cli").exec_path,

View file

@ -1,125 +0,0 @@
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Optional
from frostfs_testlib.testing.readable import HumanReadableEnum
from frostfs_testlib.utils import string_utils
logger = logging.getLogger("NeoLogger")
EACL_LIFETIME = 100500
FROSTFS_CONTRACT_CACHE_TIMEOUT = 30
class ObjectOperations(HumanReadableEnum):
PUT = "object.put"
GET = "object.get"
HEAD = "object.head"
GET_RANGE = "object.range"
GET_RANGE_HASH = "object.hash"
SEARCH = "object.search"
DELETE = "object.delete"
WILDCARD_ALL = "object.*"
@staticmethod
def get_all():
return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL]
class Verb(HumanReadableEnum):
ALLOW = "allow"
DENY = "deny"
class Role(HumanReadableEnum):
OWNER = "owner"
IR = "ir"
CONTAINER = "container"
OTHERS = "others"
class ConditionType(HumanReadableEnum):
RESOURCE = "ResourceCondition"
REQUEST = "RequestCondition"
# See https://git.frostfs.info/TrueCloudLab/policy-engine/src/branch/master/schema/native/consts.go#L40-L53
class ConditionKey(HumanReadableEnum):
ROLE = '"\\$Actor:role"'
PUBLIC_KEY = '"\\$Actor:publicKey"'
OBJECT_TYPE = '"\\$Object:objectType"'
OBJECT_ID = '"\\$Object:objectID"'
class MatchType(HumanReadableEnum):
EQUAL = "="
NOT_EQUAL = "!="
@dataclass
class Condition:
condition_key: ConditionKey | str
condition_value: str
condition_type: ConditionType = ConditionType.REQUEST
match_type: MatchType = MatchType.EQUAL
def as_string(self):
key = self.condition_key.value if isinstance(self.condition_key, ConditionKey) else self.condition_key
value = self.condition_value.value if isinstance(self.condition_value, Enum) else self.condition_value
return f"{self.condition_type.value}:{key}{self.match_type.value}{value}"
@staticmethod
def by_role(*args, **kwargs) -> "Condition":
return Condition(ConditionKey.ROLE, *args, **kwargs)
@staticmethod
def by_key(*args, **kwargs) -> "Condition":
return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs)
@staticmethod
def by_object_type(*args, **kwargs) -> "Condition":
return Condition(ConditionKey.OBJECT_TYPE, *args, **kwargs)
@staticmethod
def by_object_id(*args, **kwargs) -> "Condition":
return Condition(ConditionKey.OBJECT_ID, *args, **kwargs)
class Rule:
def __init__(
self,
access: Verb,
operations: list[ObjectOperations] | ObjectOperations,
conditions: list[Condition] | Condition = None,
chain_id: Optional[str] = None,
) -> None:
self.access = access
self.operations = operations
if not conditions:
self.conditions = []
elif isinstance(conditions, Condition):
self.conditions = [conditions]
else:
self.conditions = conditions
if not isinstance(self.conditions, list):
raise RuntimeError("Conditions must be a list")
if not operations:
self.operations = []
elif isinstance(operations, ObjectOperations):
self.operations = [operations]
else:
self.operations = operations
if not isinstance(self.operations, list):
raise RuntimeError("Operations must be a list")
self.chain_id = chain_id if chain_id else string_utils.unique_name("chain-id-")
def as_string(self):
conditions = " ".join([cond.as_string() for cond in self.conditions])
operations = " ".join([op.value for op in self.operations])
return f"{self.access.value} {operations} {conditions} *"

View file

@ -39,18 +39,12 @@ class S3Gate(NodeBase):
def get_endpoint(self) -> str: def get_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0)
def get_ns_endpoint(self, ns_name: str) -> str:
return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name)
def get_all_endpoints(self) -> list[str]: def get_all_endpoints(self) -> list[str]:
return [ return [
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0),
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1),
] ]
def get_ns_endpoint(self, ns_name: str) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name)
def service_healthcheck(self) -> bool: def service_healthcheck(self) -> bool:
health_metric = "frostfs_s3_gw_state_health" health_metric = "frostfs_s3_gw_state_health"
output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout
@ -160,6 +154,15 @@ class StorageNode(NodeBase):
def get_data_directory(self) -> str: def get_data_directory(self) -> str:
return self.host.get_data_directory(self.name) return self.host.get_data_directory(self.name)
def get_storage_config(self) -> str:
return self.host.get_storage_config(self.name)
def get_http_hostname(self) -> list[str]:
return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME)
def get_s3_hostname(self) -> list[str]:
return self._get_attribute(ConfigAttributes.S3_HOSTNAME)
def delete_blobovnicza(self): def delete_blobovnicza(self):
self.host.delete_blobovnicza(self.name) self.host.delete_blobovnicza(self.name)

View file

@ -1,36 +0,0 @@
from frostfs_testlib.hosting import Host
from frostfs_testlib.shell.interfaces import CommandResult
class Metrics:
def __init__(self, host: Host, metrics_endpoint: str) -> None:
self.storage = StorageMetrics(host, metrics_endpoint)
class StorageMetrics:
"""
Class represents storage metrics in a cluster
"""
def __init__(self, host: Host, metrics_endpoint: str) -> None:
self.host = host
self.metrics_endpoint = metrics_endpoint
def get_metrics_search_by_greps(self, **greps) -> CommandResult:
"""
Get a metrics, search by: cid, metric_type, shard_id etc.
Args:
greps: dict of grep-command-name and value
for example get_metrics_search_by_greps(command='container_objects_total', cid='123456')
Return:
result of metrics
"""
shell = self.host.get_shell()
additional_greps = " |grep ".join([grep_command for grep_command in greps.values()])
result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}")
return result
def get_all_metrics(self) -> CommandResult:
shell = self.host.get_shell()
result = shell.exec(f"curl -s {self.metrics_endpoint}")
return result

View file

@ -56,7 +56,9 @@ class Shard:
var_prefix = f"{SHARD_PREFIX}{shard_id}" var_prefix = f"{SHARD_PREFIX}{shard_id}"
blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id) blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id)
blobstors = [Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)] blobstors = [
Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)
]
write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED") write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED")
@ -69,13 +71,7 @@ class Shard:
@staticmethod @staticmethod
def from_object(shard): def from_object(shard):
metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"] metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"]
writecache_enabled = True
if "enabled" in shard["writecache"]:
writecache_enabled = shard["writecache"]["enabled"]
writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"] writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"]
if not writecache_enabled:
writecache = ""
# Currently due to issue we need to check if pilorama exists in keys # Currently due to issue we need to check if pilorama exists in keys
# TODO: make pilorama mandatory after fix # TODO: make pilorama mandatory after fix

View file

@ -70,26 +70,8 @@ class NodeNetInfo:
epoch_duration: str = None epoch_duration: str = None
inner_ring_candidate_fee: str = None inner_ring_candidate_fee: str = None
maximum_object_size: str = None maximum_object_size: str = None
maximum_count_of_data_shards: str = None
maximum_count_of_parity_shards: str = None
withdrawal_fee: str = None withdrawal_fee: str = None
homomorphic_hashing_disabled: str = None homomorphic_hashing_disabled: str = None
maintenance_mode_allowed: str = None maintenance_mode_allowed: str = None
eigen_trust_alpha: str = None eigen_trust_alpha: str = None
eigen_trust_iterations: str = None eigen_trust_iterations: str = None
@dataclass
class Chunk:
def __init__(self, object_id: str, required_nodes: list, confirmed_nodes: list, ec_parent_object_id: str, ec_index: int) -> None:
self.object_id = object_id
self.required_nodes = required_nodes
self.confirmed_nodes = confirmed_nodes
self.ec_parent_object_id = ec_parent_object_id
self.ec_index = ec_index
def __str__(self) -> str:
return self.object_id
def __repr__(self) -> str:
return self.object_id

View file

@ -1,14 +0,0 @@
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.storage.grpc_operations.implementations import container, object
class CliClientWrapper(interfaces.GrpcClientWrapper):
def __init__(self, cli: FrostfsCli) -> None:
self.cli = cli
self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli)
self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli)
class RpcClientWrapper(interfaces.GrpcClientWrapper):
pass # The next series

View file

@ -1,165 +0,0 @@
import json
from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.cli_utils import parse_netmap_output
class ChunksOperations(interfaces.ChunksInterface):
def __init__(self, cli: FrostfsCli) -> None:
self.cli = cli
@reporter.step("Search node without chunks")
def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]:
if not endpoint:
endpoint = cluster.default_rpc_endpoint
netmap = parse_netmap_output(self.cli.netmap.snapshot(endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout)
chunks_node_key = []
for chunk in chunks:
chunks_node_key.extend(chunk.confirmed_nodes)
for node_info in netmap.copy():
if node_info.node_id in chunks_node_key and node_info in netmap:
netmap.remove(node_info)
result = []
for node_info in netmap:
for cluster_node in cluster.cluster_nodes:
if node_info.node == cluster_node.host_ip:
result.append(cluster_node)
return result
@reporter.step("Search node with chunk {chunk}")
def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]:
netmap = parse_netmap_output(self.cli.netmap.snapshot(cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout)
for node_info in netmap:
if node_info.node_id in chunk.confirmed_nodes:
for cluster_node in cluster.cluster_nodes:
if cluster_node.host_ip == node_info.node:
return (cluster_node, node_info)
@wait_for_success(300, 5, fail_testcase=None)
@reporter.step("Search shard with chunk {chunk}")
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}"
node_shell = node.storage_node.host.get_shell()
shards_watcher = ShardsWatcher(node)
with reporter.step("Search object file"):
for shard_id, shard_info in shards_watcher.shards_snapshots[-1].items():
check_dir = node_shell.exec(f" [ -d {shard_info['blobstor'][1]['path']}/{oid_path} ] && echo 1 || echo 0").stdout
if "1" in check_dir.strip():
return shard_id
@reporter.step("Get all chunks")
def get_all(
self,
rpc_endpoint: str,
cid: str,
oid: str,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = True,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> list[Chunk]:
object_nodes = self.cli.object.nodes(
rpc_endpoint=rpc_endpoint,
cid=cid,
address=address,
bearer=bearer,
generate_key=generate_key,
oid=oid,
trace=trace,
root=root,
verify_presence_all=verify_presence_all,
json=json,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])
@reporter.step("Get last parity chunk")
def get_parity(
self,
rpc_endpoint: str,
cid: str,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = True,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
object_nodes = self.cli.object.nodes(
rpc_endpoint=rpc_endpoint,
cid=cid,
address=address,
bearer=bearer,
generate_key=generate_key,
oid=oid,
trace=trace,
root=root,
verify_presence_all=verify_presence_all,
json=json,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1]
@reporter.step("Get first data chunk")
def get_first_data(
self,
rpc_endpoint: str,
cid: str,
oid: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = True,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> Chunk:
object_nodes = self.cli.object.nodes(
rpc_endpoint=rpc_endpoint,
cid=cid,
address=address,
bearer=bearer,
generate_key=generate_key,
oid=oid,
trace=trace,
root=root,
verify_presence_all=verify_presence_all,
json=json,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0]
def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]:
parse_result = json.loads(object_nodes)
if parse_result.get("errors"):
raise parse_result["errors"]
return [Chunk(**chunk) for chunk in parse_result["data_objects"]]

View file

@ -1,330 +0,0 @@
import json
import logging
import re
from typing import List, Optional, Union
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.utils import json_utils
logger = logging.getLogger("NeoLogger")
class ContainerOperations(interfaces.ContainerInterface):
def __init__(self, cli: FrostfsCli) -> None:
self.cli = cli
@reporter.step("Create Container")
def create(
self,
endpoint: str,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
await_mode: bool = False,
disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None,
nonce: Optional[str] = None,
policy: Optional[str] = None,
session: Optional[str] = None,
subnet: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
A wrapper for `frostfs-cli container create` call.
Args:
wallet (WalletInfo): a wallet on whose behalf a container is created
rule (optional, str): placement rule for container
basic_acl (optional, str): an ACL for container, will be
appended to `--basic-acl` key
attributes (optional, dict): container attributes , will be
appended to `--attributes` key
session_token (optional, str): a path to session token file
session_wallet(optional, str): a path to the wallet which signed
the session token; this parameter makes sense
when paired with `session_token`
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
options (optional, dict): any other options to pass to the call
name (optional, str): container name attribute
await_mode (bool): block execution until container is persisted
wait_for_creation (): Wait for container shows in container list
timeout: Timeout for the operation.
Returns:
(str): CID of the created container
"""
result = self.cli.container.create(
rpc_endpoint=endpoint,
policy=policy,
nns_zone=nns_zone,
nns_name=nns_name,
address=address,
attributes=attributes,
basic_acl=basic_acl,
await_mode=await_mode,
disable_timestamp=disable_timestamp,
force=force,
trace=trace,
name=name,
nonce=nonce,
session=session,
subnet=subnet,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
cid = self._parse_cid(result.stdout)
logger.info("Container created; waiting until it is persisted in the sidechain")
return cid
@reporter.step("List Containers")
def list(
self,
endpoint: str,
name: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
owner: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
**params,
) -> List[str]:
"""
A wrapper for `frostfs-cli container list` call. It returns all the
available containers for the given wallet.
Args:
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
Returns:
(list): list of containers
"""
result = self.cli.container.list(
rpc_endpoint=endpoint,
name=name,
address=address,
generate_key=generate_key,
owner=owner,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
**params,
)
return result.stdout.split()
@reporter.step("List Objects in container")
def list_objects(
self,
endpoint: str,
cid: str,
bearer: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> List[str]:
"""
A wrapper for `frostfs-cli container list-objects` call. It returns all the
available objects in container.
Args:
container_id: cid of container
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
Returns:
(list): list of containers
"""
result = self.cli.container.list_objects(
rpc_endpoint=endpoint,
cid=cid,
bearer=bearer,
wallet=wallet,
address=address,
generate_key=generate_key,
trace=trace,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
logger.info(f"Container objects: \n{result}")
return result.stdout.split()
@reporter.step("Delete container")
def delete(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
await_mode: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
force: bool = False,
trace: bool = False,
):
try:
return self.cli.container.delete(
rpc_endpoint=endpoint,
cid=cid,
address=address,
await_mode=await_mode,
session=session,
ttl=ttl,
xhdr=xhdr,
force=force,
trace=trace,
).stdout
except RuntimeError as e:
print(f"Error request:\n{e}")
@reporter.step("Get container")
def get(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
to: Optional[str] = None,
json_mode: bool = True,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> Union[dict, str]:
result = self.cli.container.get(
rpc_endpoint=endpoint,
cid=cid,
address=address,
generate_key=generate_key,
await_mode=await_mode,
to=to,
json_mode=json_mode,
trace=trace,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
container_info = json.loads(result.stdout)
attributes = dict()
for attr in container_info["attributes"]:
attributes[attr["key"]] = attr["value"]
container_info["attributes"] = attributes
container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"])
return container_info
@reporter.step("Get eacl container")
def get_eacl(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
json_mode: bool = True,
trace: bool = False,
to: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
return self.cli.container.get_eacl(
rpc_endpoint=endpoint,
cid=cid,
address=address,
generate_key=generate_key,
await_mode=await_mode,
to=to,
session=session,
ttl=ttl,
xhdr=xhdr,
timeout=CLI_DEFAULT_TIMEOUT,
).stdout
@reporter.step("Get nodes container")
def nodes(
self,
endpoint: str,
cid: str,
cluster: Cluster,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True,
xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> List[ClusterNode]:
result = self.cli.container.search_node(
rpc_endpoint=endpoint,
cid=cid,
address=address,
ttl=ttl,
from_file=from_file,
trace=trace,
short=short,
xhdr=xhdr,
generate_key=generate_key,
timeout=timeout,
).stdout
pattern = r"[0-9]+(?:\.[0-9]+){3}"
nodes_ip = list(set(re.findall(pattern, result)))
with reporter.step(f"nodes ips = {nodes_ip}"):
nodes_list = cluster.get_nodes_by_ip(nodes_ip)
with reporter.step(f"Return nodes - {nodes_list}"):
return nodes_list
@reporter.step("Resolve container by name")
def resolve_container_by_name(name: str, node: ClusterNode):
resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product)
resolver: BucketContainerResolver = resolver_cls()
return resolver.resolve(node, name)
def _parse_cid(self, output: str) -> str:
"""
Parses container ID from a given CLI output. The input string we expect:
container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN
awaiting...
container has been persisted on sidechain
We want to take 'container ID' value from the string.
Args:
output (str): CLI output to parse
Returns:
(str): extracted CID
"""
try:
# taking first line from command's output
first_line = output.split("\n")[0]
except Exception:
first_line = ""
logger.error(f"Got empty output: {output}")
splitted = first_line.split(": ")
if len(splitted) != 2:
raise ValueError(f"no CID was parsed from command output: \t{first_line}")
return splitted[1]

View file

@ -1,624 +0,0 @@
import json
import logging
import os
import re
import uuid
from typing import Any, Optional
from frostfs_testlib import reporter, utils
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import cli_utils, file_utils
logger = logging.getLogger("NeoLogger")
class ObjectOperations(interfaces.ObjectInterface):
def __init__(self, cli: FrostfsCli) -> None:
self.cli = cli
self.chunks: interfaces.ChunksInterface = ChunksOperations(self.cli)
@reporter.step("Delete object")
def delete(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
DELETE an Object.
Args:
cid: ID of Container where we get the Object from
oid: ID of Object we are going to delete
bearer: path to Bearer Token file, appends to `--bearer` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): Tombstone ID
"""
result = self.cli.object.delete(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
)
id_str = result.stdout.split("\n")[1]
tombstone = id_str.split(":")[1]
return tombstone.strip()
@reporter.step("Get object")
def get(
self,
cid: str,
oid: str,
endpoint: str,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> file_utils.TestFile:
"""
GET from FrostFS.
Args:
cid (str): ID of Container where we get the Object from
oid (str): Object ID
bearer: path to Bearer Token file, appends to `--bearer` key
write_object: path to downloaded file, appends to `--file` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): path to downloaded file
"""
if not write_object:
write_object = str(uuid.uuid4())
test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, write_object))
self.cli.object.get(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
file=test_file,
bearer=bearer,
no_progress=no_progress,
xhdr=xhdr,
session=session,
timeout=timeout,
)
return test_file
@reporter.step("Get object from random node")
def get_from_random_node(
self,
cid: str,
oid: str,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
GET from FrostFS random storage node
Args:
cid: ID of Container where we get the Object from
oid: Object ID
cluster: cluster object
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
write_object (optional, str): path to downloaded file, appends to `--file` key
no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): path to downloaded file
"""
endpoint = cluster.get_random_storage_rpc_endpoint()
return self.get(
cid,
oid,
endpoint,
bearer,
write_object,
xhdr,
no_progress,
session,
timeout,
)
@reporter.step("Get hash object")
def hash(
self,
rpc_endpoint: str,
cid: str,
oid: str,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None,
salt: Optional[str] = None,
ttl: Optional[int] = None,
session: Optional[str] = None,
hash_type: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
Get object hash.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
generate_key: Generate new private key.
oid: Object ID.
range: Range to take hash from in the form offset1:length1,...
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
salt: Salt in hex format.
ttl: TTL value in request meta header (default 2).
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256").
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
result = self.cli.object.hash(
rpc_endpoint=rpc_endpoint,
cid=cid,
oid=oid,
address=address,
bearer=bearer,
generate_key=generate_key,
range=range,
salt=salt,
ttl=ttl,
xhdr=xhdr,
session=session,
hash_type=hash_type,
timeout=timeout,
)
return result.stdout
@reporter.step("Head object")
def head(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> CommandResult | Any:
"""
HEAD an Object.
Args:
cid (str): ID of Container where we get the Object from
oid (str): ObjectID to HEAD
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
endpoint(optional, str): FrostFS endpoint to send request to
json_output(optional, bool): return response in JSON format or not; this flag
turns into `--json` key
is_raw(optional, bool): send "raw" request or not; this flag
turns into `--raw` key
is_direct(optional, bool): send request directly to the node or not; this flag
turns into `--ttl 1` key
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
depending on the `json_output` parameter value, the function returns
(dict): HEAD response in JSON format
or
(str): HEAD response as a plain text
"""
result = self.cli.object.head(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
bearer=bearer,
json_mode=json_output,
raw=is_raw,
ttl=1 if is_direct else None,
xhdr=xhdr,
session=session,
timeout=timeout,
)
if not json_output:
return result
try:
decoded = json.loads(result.stdout)
except Exception as exc:
# If we failed to parse output as JSON, the cause might be
# the plain text string in the beginning of the output.
# Here we cut off first string and try to parse again.
logger.info(f"failed to parse output: {exc}")
logger.info("parsing output in another way")
fst_line_idx = result.stdout.find("\n")
decoded = json.loads(result.stdout[fst_line_idx:])
# if response
if "chunks" in decoded.keys():
logger.info("decoding ec chunks")
return decoded["chunks"]
# If response is Complex Object header, it has `splitId` key
if "splitId" in decoded.keys():
logger.info("decoding split header")
return utils.json_utils.decode_split_header(decoded)
# If response is Last or Linking Object header,
# it has `header` dictionary and non-null `split` dictionary
if "split" in decoded["header"].keys():
if decoded["header"]["split"]:
logger.info("decoding linking object")
return utils.json_utils.decode_linking_object(decoded)
if decoded["header"]["objectType"] == "STORAGE_GROUP":
logger.info("decoding storage group")
return utils.json_utils.decode_storage_group(decoded)
if decoded["header"]["objectType"] == "TOMBSTONE":
logger.info("decoding tombstone")
return utils.json_utils.decode_tombstone(decoded)
logger.info("decoding simple header")
return utils.json_utils.decode_simple_header(decoded)
@reporter.step("Lock Object")
def lock(
self,
cid: str,
oid: str,
endpoint: str,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
Locks object in container.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
oid: Object ID.
lifetime: Lock lifetime.
expire_at: Lock expiration epoch.
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
session: Path to a JSON-encoded container session token.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation.
Returns:
Lock object ID
"""
result = self.cli.object.lock(
rpc_endpoint=endpoint,
lifetime=lifetime,
expire_at=expire_at,
address=address,
cid=cid,
oid=oid,
bearer=bearer,
xhdr=xhdr,
session=session,
ttl=ttl,
timeout=timeout,
)
# Splitting CLI output to separate lines and taking the penultimate line
id_str = result.stdout.strip().split("\n")[0]
oid = id_str.split(":")[1]
return oid.strip()
@reporter.step("Put object")
def put(
self,
path: str,
cid: str,
endpoint: str,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
PUT of given file.
Args:
path: path to file to be PUT
cid: ID of Container where we get the Object from
bearer: path to Bearer Token file, appends to `--bearer` key
copies_number: Number of copies of the object to store within the RPC call
attributes: User attributes in form of Key1=Value1,Key2=Value2
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
no_progress: do not show progress bar
expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): ID of uploaded Object
"""
result = self.cli.object.put(
rpc_endpoint=endpoint,
file=path,
cid=cid,
attributes=attributes,
bearer=bearer,
copies_number=copies_number,
expire_at=expire_at,
no_progress=no_progress,
xhdr=xhdr,
session=session,
timeout=timeout,
)
# Splitting CLI output to separate lines and taking the penultimate line
id_str = result.stdout.strip().split("\n")[-2]
oid = id_str.split(":")[1]
return oid.strip()
@reporter.step("Put object to random node")
def put_to_random_node(
self,
path: str,
cid: str,
cluster: Cluster,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
PUT of given file to a random storage node.
Args:
path: path to file to be PUT
cid: ID of Container where we get the Object from
cluster: cluster under test
bearer: path to Bearer Token file, appends to `--bearer` key
copies_number: Number of copies of the object to store within the RPC call
attributes: User attributes in form of Key1=Value1,Key2=Value2
cluster: cluster under test
no_progress: do not show progress bar
expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
ID of uploaded Object
"""
endpoint = cluster.get_random_storage_rpc_endpoint()
return self.put(
path,
cid,
endpoint,
bearer,
copies_number,
attributes,
xhdr,
expire_at,
no_progress,
session,
timeout=timeout,
)
@reporter.step("Get Range")
def range(
self,
cid: str,
oid: str,
range_cut: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> tuple[file_utils.TestFile, bytes]:
"""
GETRANGE an Object.
Args:
wallet: wallet on whose behalf GETRANGE is done
cid: ID of Container where we get the Object from
oid: ID of Object we are going to request
range_cut: range to take data from in the form offset:length
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
bearer: path to Bearer Token file, appends to `--bearer` key
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str, bytes) - path to the file with range content and content of this file as bytes
"""
test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4())))
self.cli.object.range(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
range=range_cut,
file=test_file,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
)
with open(test_file, "rb") as file:
content = file.read()
return test_file, content
@reporter.step("Search object")
def search(
self,
cid: str,
endpoint: str,
bearer: str = "",
oid: Optional[str] = None,
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
root: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
ttl: Optional[int] = None,
) -> list:
"""
SEARCH an Object.
Args:
wallet: wallet on whose behalf SEARCH is done
cid: ID of Container where we get the Object from
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
filters: key=value pairs to filter Objects
expected_objects_list: a list of ObjectIDs to compare found Objects with
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
phy: Search physically stored objects.
root: Search for user objects.
timeout: Timeout for the operation.
Returns:
list of found ObjectIDs
"""
result = self.cli.object.search(
rpc_endpoint=endpoint,
cid=cid,
bearer=bearer,
oid=oid,
xhdr=xhdr,
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None,
session=session,
phy=phy,
root=root,
address=address,
generate_key=generate_key,
ttl=ttl,
timeout=timeout,
)
found_objects = re.findall(r"(\w{43,44})", result.stdout)
if expected_objects_list:
if sorted(found_objects) == sorted(expected_objects_list):
logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'")
else:
logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'")
return found_objects
@wait_for_success()
@reporter.step("Search object nodes")
def nodes(
self,
cluster: Cluster,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[ClusterNode]:
endpoint = alive_node.storage_node.get_rpc_endpoint()
response = self.cli.object.nodes(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
bearer=bearer,
ttl=1 if is_direct else None,
json=True,
xhdr=xhdr,
timeout=timeout,
verify_presence_all=verify_presence_all,
)
response_json = json.loads(response.stdout)
# Currently, the command will show expected and confirmed nodes.
# And we (currently) count only nodes which are both expected and confirmed
object_nodes_id = {
required_node
for data_object in response_json["data_objects"]
for required_node in data_object["required_nodes"]
if required_node in data_object["confirmed_nodes"]
}
netmap_nodes_list = cli_utils.parse_netmap_output(
self.cli.netmap.snapshot(
rpc_endpoint=endpoint,
).stdout
)
netmap_nodes = [
netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id
]
object_nodes = [
cluster_node
for netmap_node in netmap_nodes
for cluster_node in cluster.cluster_nodes
if netmap_node.node == cluster_node.host_ip
]
return object_nodes

View file

@ -1,392 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.constants import PlacementRule
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
from frostfs_testlib.utils import file_utils
class ChunksInterface(ABC):
@abstractmethod
def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]:
pass
@abstractmethod
def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]:
pass
@abstractmethod
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
pass
@abstractmethod
def get_all(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> list[Chunk]:
pass
@abstractmethod
def get_parity(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
@abstractmethod
def get_first_data(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
class ObjectInterface(ABC):
def __init__(self) -> None:
self.chunks: ChunksInterface
@abstractmethod
def delete(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def get(
self,
cid: str,
oid: str,
endpoint: str,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> file_utils.TestFile:
pass
@abstractmethod
def get_from_random_node(
self,
cid: str,
oid: str,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def hash(
self,
endpoint: str,
cid: str,
oid: str,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None,
salt: Optional[str] = None,
ttl: Optional[int] = None,
session: Optional[str] = None,
hash_type: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def head(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult | Any:
pass
@abstractmethod
def lock(
self,
cid: str,
oid: str,
endpoint: str,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def put(
self,
path: str,
cid: str,
endpoint: str,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def put_to_random_node(
self,
path: str,
cid: str,
cluster: Cluster,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def range(
self,
cid: str,
oid: str,
range_cut: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> tuple[file_utils.TestFile, bytes]:
pass
@abstractmethod
def search(
self,
cid: str,
endpoint: str,
bearer: str = "",
oid: Optional[str] = None,
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
root: bool = False,
timeout: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
ttl: Optional[int] = None,
) -> List:
pass
@abstractmethod
def nodes(
self,
cluster: Cluster,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
pass
class ContainerInterface(ABC):
@abstractmethod
def create(
self,
endpoint: str,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
await_mode: bool = False,
disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None,
nonce: Optional[str] = None,
policy: Optional[str] = None,
session: Optional[str] = None,
subnet: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
"""
Create a new container and register it in the FrostFS.
It will be stored in the sidechain when the Inner Ring accepts it.
"""
raise NotImplementedError("No implemethed method create")
@abstractmethod
def delete(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
await_mode: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
force: bool = False,
trace: bool = False,
) -> List[str]:
"""
Delete an existing container.
Only the owner of the container has permission to remove the container.
"""
raise NotImplementedError("No implemethed method delete")
@abstractmethod
def get(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
to: Optional[str] = None,
json_mode: bool = True,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get container field info."""
raise NotImplementedError("No implemethed method get")
@abstractmethod
def get_eacl(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
json_mode: bool = True,
trace: bool = False,
to: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get extended ACL table of container."""
raise NotImplementedError("No implemethed method get-eacl")
@abstractmethod
def list(
self,
endpoint: str,
name: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
owner: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
**params,
) -> List[str]:
"""List all created containers."""
raise NotImplementedError("No implemethed method list")
@abstractmethod
def nodes(
self,
endpoint: str,
cid: str,
cluster: Cluster,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True,
xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
"""Show the nodes participating in the container in the current epoch."""
raise NotImplementedError("No implemethed method nodes")
class GrpcClientWrapper(ABC):
def __init__(self) -> None:
self.object: ObjectInterface
self.container: ContainerInterface

View file

@ -32,7 +32,7 @@ class ClusterTestBase:
): ):
epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node)
if wait_block: if wait_block:
self.wait_for_blocks(wait_block) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * wait_block)
def wait_for_epochs_align(self): def wait_for_epochs_align(self):
epoch.wait_for_epochs_align(self.shell, self.cluster) epoch.wait_for_epochs_align(self.shell, self.cluster)
@ -42,6 +42,3 @@ class ClusterTestBase:
def ensure_fresh_epoch(self): def ensure_fresh_epoch(self):
return epoch.ensure_fresh_epoch(self.shell, self.cluster) return epoch.ensure_fresh_epoch(self.shell, self.cluster)
def wait_for_blocks(self, blocks_count: int = 1):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * blocks_count)

View file

@ -1,21 +1,7 @@
import itertools import itertools
from concurrent.futures import Future, ThreadPoolExecutor from concurrent.futures import Future, ThreadPoolExecutor
from contextlib import contextmanager
from typing import Callable, Collection, Optional, Union from typing import Callable, Collection, Optional, Union
MAX_WORKERS = 50
@contextmanager
def parallel_workers_limit(workers_count: int):
global MAX_WORKERS
original_value = MAX_WORKERS
MAX_WORKERS = workers_count
try:
yield
finally:
MAX_WORKERS = original_value
def parallel( def parallel(
fn: Union[Callable, list[Callable]], fn: Union[Callable, list[Callable]],
@ -68,7 +54,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]:
futures: list[Future] = [] futures: list[Future] = []
with ThreadPoolExecutor(max_workers=min(len(fn_list), MAX_WORKERS)) as executor: with ThreadPoolExecutor(max_workers=len(fn_list)) as executor:
for fn in fn_list: for fn in fn_list:
task_args = _get_args(*args) task_args = _get_args(*args)
task_kwargs = _get_kwargs(**kwargs) task_kwargs = _get_kwargs(**kwargs)
@ -81,7 +67,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]:
def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]: def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]:
futures: list[Future] = [] futures: list[Future] = []
with ThreadPoolExecutor(max_workers=min(len(parallel_items), MAX_WORKERS)) as executor: with ThreadPoolExecutor(max_workers=len(parallel_items)) as executor:
for item in parallel_items: for item in parallel_items:
task_args = _get_args(*args) task_args = _get_args(*args)
task_kwargs = _get_kwargs(**kwargs) task_kwargs = _get_kwargs(**kwargs)

View file

@ -15,7 +15,7 @@ from contextlib import suppress
from datetime import datetime from datetime import datetime
from io import StringIO from io import StringIO
from textwrap import shorten from textwrap import shorten
from typing import Dict, List, Optional, TypedDict, Union from typing import Dict, List, TypedDict, Union
import pexpect import pexpect
@ -75,22 +75,14 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date
reporter.attach(command_attachment, "Command execution") reporter.attach(command_attachment, "Command execution")
def log_command_execution(url: str, cmd: str, output: Union[str, dict], params: Optional[dict] = None) -> None: def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None:
logger.info(f"{cmd}: {output}") logger.info(f"{cmd}: {output}")
with suppress(Exception): with suppress(Exception):
json_output = json.dumps(output, indent=4, sort_keys=True) json_output = json.dumps(output, indent=4, sort_keys=True)
output = json_output output = json_output
command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n"
try: with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
json_params = json.dumps(params, indent=4, sort_keys=True) reporter.attach(command_attachment, "Command execution")
except TypeError as err:
logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}")
else:
params = json_params
command_attachment = f"COMMAND: '{cmd}'\n" f"URL: {url}\n" f"PARAMS:\n{params}\n" f"OUTPUT:\n{output}\n"
reporter.attach(command_attachment, "Command execution")
def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: def parse_netmap_output(output: str) -> list[NodeNetmapInfo]:

View file

@ -6,46 +6,11 @@ from typing import Any, Optional
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.utils import string_utils
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
class TestFile(os.PathLike): def generate_file(size: int) -> str:
def __init__(self, path: str):
self.path = path
def __del__(self):
logger.debug(f"Removing file {self.path}")
if os.path.exists(self.path):
os.remove(self.path)
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __fspath__(self):
return self.path
def ensure_directory(path):
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
def ensure_directory_opener(path, flags):
ensure_directory(path)
return os.open(path, flags)
# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps
# Use object_size dt in future as argument
@reporter.step("Generate file")
def generate_file(size: int) -> TestFile:
"""Generates a binary file with the specified size in bytes. """Generates a binary file with the specified size in bytes.
Args: Args:
@ -54,22 +19,19 @@ def generate_file(size: int) -> TestFile:
Returns: Returns:
The path to the generated file. The path to the generated file.
""" """
test_file = TestFile(os.path.join(ASSETS_DIR, string_utils.unique_name("object-"))) file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
with open(test_file, "wb", opener=ensure_directory_opener) as file: with open(file_path, "wb") as file:
file.write(os.urandom(size)) file.write(os.urandom(size))
logger.info(f"File with size {size} bytes has been generated: {test_file}") logger.info(f"File with size {size} bytes has been generated: {file_path}")
return test_file return file_path
# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps
# Use object_size dt in future as argument
@reporter.step("Generate file with content")
def generate_file_with_content( def generate_file_with_content(
size: int, size: int,
file_path: Optional[str | TestFile] = None, file_path: Optional[str] = None,
content: Optional[str] = None, content: Optional[str] = None,
) -> TestFile: ) -> str:
"""Creates a new file with specified content. """Creates a new file with specified content.
Args: Args:
@ -86,22 +48,20 @@ def generate_file_with_content(
content = os.urandom(size) content = os.urandom(size)
mode = "wb" mode = "wb"
test_file = None
if not file_path: if not file_path:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
elif isinstance(file_path, TestFile):
test_file = file_path
else: else:
test_file = TestFile(file_path) if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
with open(test_file, mode, opener=ensure_directory_opener) as file: with open(file_path, mode) as file:
file.write(content) file.write(content)
return test_file return file_path
@reporter.step("Get File Hash") @reporter.step("Get File Hash")
def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset: Optional[int] = None) -> str: def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str:
"""Generates hash for the specified file. """Generates hash for the specified file.
Args: Args:
@ -128,7 +88,7 @@ def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset:
@reporter.step("Concatenation set of files to one file") @reporter.step("Concatenation set of files to one file")
def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional[str | TestFile] = None) -> TestFile: def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str:
"""Concatenates several files into a single file. """Concatenates several files into a single file.
Args: Args:
@ -138,24 +98,16 @@ def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional
Returns: Returns:
Path to the resulting file. Path to the resulting file.
""" """
test_file = None
if not resulting_file_path: if not resulting_file_path:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
elif isinstance(resulting_file_path, TestFile): with open(resulting_file_path, "wb") as f:
test_file = resulting_file_path
else:
test_file = TestFile(resulting_file_path)
with open(test_file, "wb", opener=ensure_directory_opener) as f:
for file in file_paths: for file in file_paths:
with open(file, "rb") as part_file: with open(file, "rb") as part_file:
f.write(part_file.read()) f.write(part_file.read())
return test_file return resulting_file_path
@reporter.step("Split file to {parts} parts") def split_file(file_path: str, parts: int) -> list[str]:
def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]:
"""Splits specified file into several specified number of parts. """Splits specified file into several specified number of parts.
Each part is saved under name `{original_file}_part_{i}`. Each part is saved under name `{original_file}_part_{i}`.
@ -177,7 +129,7 @@ def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]:
part_file_paths = [] part_file_paths = []
for content_offset in range(0, content_size + 1, chunk_size): for content_offset in range(0, content_size + 1, chunk_size):
part_file_name = f"{file_path}_part_{part_id}" part_file_name = f"{file_path}_part_{part_id}"
part_file_paths.append(TestFile(part_file_name)) part_file_paths.append(part_file_name)
with open(part_file_name, "wb") as out_file: with open(part_file_name, "wb") as out_file:
out_file.write(content[content_offset : content_offset + chunk_size]) out_file.write(content[content_offset : content_offset + chunk_size])
part_id += 1 part_id += 1
@ -185,8 +137,9 @@ def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]:
return part_file_paths return part_file_paths
@reporter.step("Get file content") def get_file_content(
def get_file_content(file_path: str | TestFile, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None) -> Any: file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None
) -> Any:
"""Returns content of specified file. """Returns content of specified file.
Args: Args:

View file

@ -1,28 +1,11 @@
import itertools
import random import random
import re import re
import string import string
from datetime import datetime
ONLY_ASCII_LETTERS = string.ascii_letters ONLY_ASCII_LETTERS = string.ascii_letters
DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits
NON_DIGITS_AND_LETTERS = string.punctuation NON_DIGITS_AND_LETTERS = string.punctuation
FUSE = itertools.cycle(range(5))
def unique_name(prefix: str = "", postfix: str = ""):
"""
Generate unique short name of anything with prefix.
This should be unique in scope of multiple runs
Args:
prefix: prefix for unique name generation
Returns:
unique name string
"""
return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{next(FUSE)}{postfix}"
def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS):
""" """

View file

@ -1,6 +1,5 @@
import logging import logging
import re import re
from functools import lru_cache
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
@ -18,14 +17,14 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]: for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]:
out = shell.exec(f"{binary} --version").stdout out = shell.exec(f"{binary} --version").stdout
versions[binary] = parse_version(out) versions[binary] = _parse_version(out)
frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC)
versions[FROSTFS_CLI_EXEC] = parse_version(frostfs_cli.version.get().stdout) versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout)
try: try:
frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC) frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC)
versions[FROSTFS_ADM_EXEC] = parse_version(frostfs_adm.version.get().stdout) versions[FROSTFS_ADM_EXEC] = _parse_version(frostfs_adm.version.get().stdout)
except RuntimeError: except RuntimeError:
logger.info(f"{FROSTFS_ADM_EXEC} not installed") logger.info(f"{FROSTFS_ADM_EXEC} not installed")
@ -37,54 +36,80 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
return versions return versions
@reporter.step("Collect binaries versions from host")
def parallel_binary_verions(host: Host) -> dict[str, str]: def parallel_binary_verions(host: Host) -> dict[str, str]:
versions_by_host = {} versions_by_host = {}
binary_path_by_name = { binary_path_by_name = {} # Maps binary name to executable path
**{ for service_config in host.config.services:
svc.name[:-3]: { exec_path = service_config.attributes.get("exec_path")
"exec_path": svc.attributes.get("exec_path"), requires_check = service_config.attributes.get("requires_version_check", "true")
"param": svc.attributes.get("custom_version_parameter", "--version"), if exec_path:
binary_path_by_name[service_config.name] = {
"exec_path": exec_path,
"check": requires_check.lower() == "true",
} }
for svc in host.config.services for cli_config in host.config.clis:
if svc.attributes.get("exec_path") and svc.attributes.get("requires_version_check", "true") == "true" requires_check = cli_config.attributes.get("requires_version_check", "true")
}, binary_path_by_name[cli_config.name] = {
**{ "exec_path": cli_config.exec_path,
cli.name: {"exec_path": cli.exec_path, "param": cli.attributes.get("custom_version_parameter", "--version")} "check": requires_check.lower() == "true",
for cli in host.config.clis }
if cli.attributes.get("requires_version_check", "true") == "true"
},
}
shell = host.get_shell() shell = host.get_shell()
versions_at_host = {} versions_at_host = {}
for binary_name, binary in binary_path_by_name.items(): for binary_name, binary in binary_path_by_name.items():
binary_path = binary["exec_path"]
try: try:
result = shell.exec(f"{binary_path} {binary['param']}") binary_path = binary["exec_path"]
version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" result = shell.exec(f"{binary_path} --version")
versions_at_host[binary_name] = version versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]}
except Exception as exc: except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}") logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = "Unknown" versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]}
versions_by_host[host.config.address] = versions_at_host versions_by_host[host.config.address] = versions_at_host
return versions_by_host return versions_by_host
@lru_cache @reporter.step("Get remote binaries versions")
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]: def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]:
versions_by_host: dict[str, dict[str, str]] = {} versions_by_host = {}
future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts)
with reporter.step("Get remote binaries versions"):
future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts)
for future in future_binary_verions: for future in future_binary_verions:
versions_by_host.update(future.result()) versions_by_host.update(future.result())
return versions_by_host # Consolidate versions across all hosts
cheak_versions = {}
exсeptions = []
exception = set()
previous_host = None
versions = {}
captured_version = None
for host, binary_versions in versions_by_host.items():
for name, binary in binary_versions.items():
version = binary["version"]
if not cheak_versions.get(f"{name[:-2]}", None):
captured_version = cheak_versions.get(f"{name[:-2]}", {}).get(host, {}).get(captured_version)
cheak_versions[f"{name[:-2]}"] = {host: {version: name}}
else:
captured_version = list(cheak_versions.get(f"{name[:-2]}", {}).get(previous_host).keys())[0]
cheak_versions[f"{name[:-2]}"].update({host: {version: name}})
if captured_version and captured_version != version:
exception.add(name[:-2])
versions[name] = {"version": version, "check": binary["check"]}
previous_host = host
logger.info(
"Remote binaries versions:\n" + "\n".join([f"{key} ver: {value['version']}" for key, value in versions.items()])
)
if exception:
for i in exception:
for host in versions_by_host.keys():
for version, name in cheak_versions.get(i).get(host).items():
exсeptions.append(f"Binary {name} has inconsistent version {version} on host {host}")
exсeptions.append("\n")
return versions, exсeptions
def parse_version(version_output: str) -> str: def _parse_version(version_output: str) -> str:
version = re.search(r"(?<=version[:=])\s?[\"\']?v?(.+)", version_output, re.IGNORECASE) version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE)
return version.group(1).strip("\"'\n\t ") if version else version_output return version.group(1).strip() if version else version_output

View file

@ -3,7 +3,14 @@ from typing import Any, get_args
import pytest import pytest
from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom from frostfs_testlib.load.load_config import (
EndpointSelectionStrategy,
LoadParams,
LoadScenario,
LoadType,
Preset,
ReadFrom,
)
from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.load.runners import DefaultRunner
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
@ -92,7 +99,9 @@ class TestLoadConfig:
def test_load_controller_string_representation(self, load_params: LoadParams): def test_load_controller_string_representation(self, load_params: LoadParams):
load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL
load_params.object_size = 512 load_params.object_size = 512
background_load_controller = BackgroundLoadController("tmp", load_params, None, None, DefaultRunner(None)) background_load_controller = BackgroundLoadController(
"tmp", load_params, "wallet", None, None, DefaultRunner(None)
)
expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" expected = "grpc 512 KiB, writers=7, readers=7, deleters=8"
assert f"{background_load_controller}" == expected assert f"{background_load_controller}" == expected
assert repr(background_load_controller) == expected assert repr(background_load_controller) == expected
@ -132,7 +141,7 @@ class TestLoadConfig:
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--policy 'container_placement_policy'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--local", "--local",
@ -164,7 +173,7 @@ class TestLoadConfig:
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--policy 'container_placement_policy'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--local", "--local",
@ -205,7 +214,7 @@ class TestLoadConfig:
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--buckets '13'", "--buckets '13'",
"--location 's3_location' --location 's3_location_2'", "--location 's3_location'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--acl 'acl'", "--acl 'acl'",
@ -239,7 +248,7 @@ class TestLoadConfig:
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--buckets '13'", "--buckets '13'",
"--location 's3_location' --location 's3_location_2'", "--location 's3_location'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--acl 'acl'", "--acl 'acl'",
@ -279,7 +288,7 @@ class TestLoadConfig:
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--buckets '13'", "--buckets '13'",
"--location 's3_location' --location 's3_location_2'", "--location 's3_location'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--acl 'acl'", "--acl 'acl'",
@ -320,7 +329,7 @@ class TestLoadConfig:
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--policy 'container_placement_policy'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--acl 'acl'", "--acl 'acl'",
@ -353,13 +362,12 @@ class TestLoadConfig:
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--policy 'container_placement_policy'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--acl 'acl'", "--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"CONFIG_DIR": "config_dir",
"CONFIG_FILE": "config_file", "CONFIG_FILE": "config_file",
"DURATION": 9, "DURATION": 9,
"WRITE_OBJ_SIZE": 11, "WRITE_OBJ_SIZE": 11,
@ -372,49 +380,12 @@ class TestLoadConfig:
"DELETERS": 8, "DELETERS": 8,
"READ_AGE": 8, "READ_AGE": 8,
"STREAMING": 9, "STREAMING": 9,
"MAX_TOTAL_SIZE_GB": 17,
"PREGEN_JSON": "pregen_json", "PREGEN_JSON": "pregen_json",
} }
self._check_preset_params(load_params, expected_preset_args) self._check_preset_params(load_params, expected_preset_args)
self._check_env_vars(load_params, expected_env_vars) self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize(
"input, value, params",
[
(["A C ", " B"], ["A C", "B"], [f"--policy 'A C' --policy 'B'"]),
(" A ", ["A"], ["--policy 'A'"]),
(" A , B ", ["A , B"], ["--policy 'A , B'"]),
([" A", "B "], ["A", "B"], ["--policy 'A' --policy 'B'"]),
(None, None, []),
],
)
def test_grpc_list_parsing_formatter(self, input, value, params):
load_params = LoadParams(LoadType.gRPC)
load_params.preset = Preset()
load_params.preset.container_placement_policy = input
assert load_params.preset.container_placement_policy == value
self._check_preset_params(load_params, params)
@pytest.mark.parametrize(
"input, value, params",
[
(["A C ", " B"], ["A C", "B"], [f"--location 'A C' --location 'B'"]),
(" A ", ["A"], ["--location 'A'"]),
(" A , B ", ["A , B"], ["--location 'A , B'"]),
([" A", "B "], ["A", "B"], ["--location 'A' --location 'B'"]),
(None, None, []),
],
)
def test_s3_list_parsing_formatter(self, input, value, params):
load_params = LoadParams(LoadType.S3)
load_params.preset = Preset()
load_params.preset.s3_location = input
assert load_params.preset.s3_location == value
self._check_preset_params(load_params, params)
@pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True)
def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams):
expected_env_vars = { expected_env_vars = {
@ -621,7 +592,6 @@ class TestLoadConfig:
"--acl ''", "--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"CONFIG_DIR": "",
"CONFIG_FILE": "", "CONFIG_FILE": "",
"DURATION": 0, "DURATION": 0,
"WRITE_OBJ_SIZE": 0, "WRITE_OBJ_SIZE": 0,
@ -629,7 +599,6 @@ class TestLoadConfig:
"K6_OUT": "", "K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "", "K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "", "K6_SETUP_TIMEOUT": "",
"MAX_TOTAL_SIZE_GB": 0,
"WRITERS": 0, "WRITERS": 0,
"READERS": 0, "READERS": 0,
"DELETERS": 0, "DELETERS": 0,
@ -720,7 +689,9 @@ class TestLoadConfig:
value = getattr(dataclass, field.name) value = getattr(dataclass, field.name)
assert value is not None, f"{field.name} is not None" assert value is not None, f"{field.name} is not None"
def _get_filled_load_params(self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False) -> LoadParams: def _get_filled_load_params(
self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False
) -> LoadParams:
load_type_map = { load_type_map = {
LoadScenario.S3: LoadType.S3, LoadScenario.S3: LoadType.S3,
LoadScenario.S3_CAR: LoadType.S3, LoadScenario.S3_CAR: LoadType.S3,
@ -737,12 +708,13 @@ class TestLoadConfig:
meta_fields = self._get_meta_fields(load_params) meta_fields = self._get_meta_fields(load_params)
for field in meta_fields: for field in meta_fields:
if getattr(field.instance, field.field.name) is None and load_params.scenario in field.field.metadata["applicable_scenarios"]: if (
getattr(field.instance, field.field.name) is None
and load_params.scenario in field.field.metadata["applicable_scenarios"]
):
value_to_set_map = { value_to_set_map = {
int: 0 if set_emtpy else len(field.field.name), int: 0 if set_emtpy else len(field.field.name),
float: 0 if set_emtpy else len(field.field.name),
str: "" if set_emtpy else field.field.name, str: "" if set_emtpy else field.field.name,
list[str]: "" if set_emtpy else [field.field.name, f"{field.field.name}_2"],
bool: False if set_emtpy else True, bool: False if set_emtpy else True,
} }
value_to_set = value_to_set_map[field.field_type] value_to_set = value_to_set_map[field.field_type]
@ -755,7 +727,11 @@ class TestLoadConfig:
def _get_meta_fields(self, instance): def _get_meta_fields(self, instance):
data_fields = fields(instance) data_fields = fields(instance)
fields_with_data = [MetaTestField(field, self._get_actual_field_type(field), instance) for field in data_fields if field.metadata] fields_with_data = [
MetaTestField(field, self._get_actual_field_type(field), instance)
for field in data_fields
if field.metadata
]
for field in data_fields: for field in data_fields:
actual_field_type = self._get_actual_field_type(field) actual_field_type = self._get_actual_field_type(field)