[#133] Change reporter usage

Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
This commit is contained in:
Andrey Berezin 2023-11-29 15:27:17 +03:00
parent 39a17f3634
commit dc6b0e407f
37 changed files with 478 additions and 678 deletions

View file

@ -1,8 +1,8 @@
from typing import Callable from typing import Callable
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.steps.node_management import storage_node_healthcheck
@ -10,8 +10,6 @@ from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.failover_utils import check_services_status from frostfs_testlib.utils.failover_utils import check_services_status
reporter = get_reporter()
class BasicHealthcheck(Healthcheck): class BasicHealthcheck(Healthcheck):
def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]): def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]):
@ -23,36 +21,33 @@ class BasicHealthcheck(Healthcheck):
assert not issues, "Issues found:\n" + "\n".join(issues) assert not issues, "Issues found:\n" + "\n".join(issues)
@wait_for_success(900, 30) @wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}")
def full_healthcheck(self, cluster_node: ClusterNode): def full_healthcheck(self, cluster_node: ClusterNode):
checks = { checks = {
self.storage_healthcheck: {}, self.storage_healthcheck: {},
self._tree_healthcheck: {}, self._tree_healthcheck: {},
} }
with reporter.step(f"Perform full healthcheck for {cluster_node}"): self._perform(cluster_node, checks)
self._perform(cluster_node, checks)
@wait_for_success(900, 30) @wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}")
def startup_healthcheck(self, cluster_node: ClusterNode): def startup_healthcheck(self, cluster_node: ClusterNode):
checks = { checks = {
self.storage_healthcheck: {}, self.storage_healthcheck: {},
self._tree_healthcheck: {}, self._tree_healthcheck: {},
} }
with reporter.step(f"Perform startup healthcheck on {cluster_node}"): self._perform(cluster_node, checks)
self._perform(cluster_node, checks)
@wait_for_success(900, 30) @wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}")
def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:
checks = { checks = {
self._storage_healthcheck: {}, self._storage_healthcheck: {},
} }
with reporter.step(f"Perform storage healthcheck on {cluster_node}"): self._perform(cluster_node, checks)
self._perform(cluster_node, checks)
@wait_for_success(120, 5) @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}")
def services_healthcheck(self, cluster_node: ClusterNode): def services_healthcheck(self, cluster_node: ClusterNode):
svcs_to_check = cluster_node.services svcs_to_check = cluster_node.services
checks = { checks = {
@ -63,8 +58,7 @@ class BasicHealthcheck(Healthcheck):
self._check_services: {"services": svcs_to_check}, self._check_services: {"services": svcs_to_check},
} }
with reporter.step(f"Perform service healthcheck on {cluster_node}"): self._perform(cluster_node, checks)
self._perform(cluster_node, checks)
def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]): def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]):
for svc in services: for svc in services:
@ -72,14 +66,14 @@ class BasicHealthcheck(Healthcheck):
if result == False: if result == False:
return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}." return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}."
@reporter.step_deco("Storage healthcheck on {cluster_node}") @reporter.step("Storage healthcheck on {cluster_node}")
def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:
result = storage_node_healthcheck(cluster_node.storage_node) result = storage_node_healthcheck(cluster_node.storage_node)
self._gather_socket_info(cluster_node) self._gather_socket_info(cluster_node)
if result.health_status != "READY" or result.network_status != "ONLINE": if result.health_status != "READY" or result.network_status != "ONLINE":
return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}"
@reporter.step_deco("Tree healthcheck on {cluster_node}") @reporter.step("Tree healthcheck on {cluster_node}")
def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
host = cluster_node.host host = cluster_node.host
service_config = host.get_service_config(cluster_node.storage_node.name) service_config = host.get_service_config(cluster_node.storage_node.name)
@ -102,6 +96,6 @@ class BasicHealthcheck(Healthcheck):
f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}"
) )
@reporter.step_deco("Gather socket info for {cluster_node}") @reporter.step("Gather socket info for {cluster_node}")
def _gather_socket_info(self, cluster_node: ClusterNode): def _gather_socket_info(self, cluster_node: ClusterNode):
cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False))

View file

@ -8,10 +8,10 @@ from time import sleep
from typing import Any from typing import Any
from urllib.parse import urlparse from urllib.parse import urlparse
from frostfs_testlib import reporter
from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType
from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.processes.remote_process import RemoteProcess
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.common import STORAGE_USER_NAME
from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
@ -21,7 +21,6 @@ from frostfs_testlib.testing.test_control import wait_for_success
EXIT_RESULT_CODE = 0 EXIT_RESULT_CODE = 0
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
reporter = get_reporter()
@dataclass @dataclass
@ -102,7 +101,7 @@ class K6:
self.preset_output = result.stdout.strip("\n") self.preset_output = result.stdout.strip("\n")
return self.preset_output return self.preset_output
@reporter.step_deco("Generate K6 command") @reporter.step("Generate K6 command")
def _generate_env_variables(self) -> str: def _generate_env_variables(self) -> str:
env_vars = self.load_params.get_env_vars() env_vars = self.load_params.get_env_vars()
@ -216,7 +215,7 @@ class K6:
return self._k6_process.running() return self._k6_process.running()
return False return False
@reporter.step_deco("Wait until K6 process end") @reporter.step("Wait until K6 process end")
@wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") @wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout")
def _wait_until_process_end(self): def _wait_until_process_end(self):
return self._k6_process.running() return self._k6_process.running()

View file

@ -1,11 +1,6 @@
import logging from frostfs_testlib import reporter
from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_config import LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object from frostfs_testlib.load.load_metrics import get_metrics_object
from frostfs_testlib.reporter import get_reporter
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
class LoadVerifier: class LoadVerifier:
@ -49,19 +44,11 @@ class LoadVerifier:
if deleters and not delete_operations: if deleters and not delete_operations:
issues.append(f"No any delete operation was performed") issues.append(f"No any delete operation was performed")
if ( if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold:
write_operations
and writers
and write_errors / write_operations * 100 > self.load_params.error_threshold
):
issues.append( issues.append(
f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}"
) )
if ( if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold:
read_operations
and readers
and read_errors / read_operations * 100 > self.load_params.error_threshold
):
issues.append( issues.append(
f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}"
) )
@ -89,9 +76,7 @@ class LoadVerifier:
) )
return verify_issues return verify_issues
def _collect_verify_issues_on_process( def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]:
self, label, load_summary, verification_summary
) -> list[str]:
issues = [] issues = []
load_metrics = get_metrics_object(self.load_params.scenario, load_summary) load_metrics = get_metrics_object(self.load_params.scenario, load_summary)

View file

@ -9,13 +9,13 @@ from urllib.parse import urlparse
import yaml import yaml
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate
from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources import optionals from frostfs_testlib.resources import optionals
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.common import STORAGE_USER_NAME
@ -31,17 +31,15 @@ from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_keeper import FileKeeper from frostfs_testlib.utils.file_keeper import FileKeeper
reporter = get_reporter()
class RunnerBase(ScenarioRunner): class RunnerBase(ScenarioRunner):
k6_instances: list[K6] k6_instances: list[K6]
@reporter.step_deco("Run preset on loaders") @reporter.step("Run preset on loaders")
def preset(self): def preset(self):
parallel([k6.preset for k6 in self.k6_instances]) parallel([k6.preset for k6 in self.k6_instances])
@reporter.step_deco("Wait until load finish") @reporter.step("Wait until load finish")
def wait_until_finish(self, soft_timeout: int = 0): def wait_until_finish(self, soft_timeout: int = 0):
parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout) parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout)
@ -70,7 +68,7 @@ class DefaultRunner(RunnerBase):
self.loaders_wallet = loaders_wallet self.loaders_wallet = loaders_wallet
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Preparation steps") @reporter.step("Preparation steps")
def prepare( def prepare(
self, self,
load_params: LoadParams, load_params: LoadParams,
@ -127,7 +125,7 @@ class DefaultRunner(RunnerBase):
] ]
shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
@reporter.step_deco("Init k6 instances") @reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = [] self.k6_instances = []
cycled_loaders = itertools.cycle(self.loaders) cycled_loaders = itertools.cycle(self.loaders)
@ -271,7 +269,7 @@ class LocalRunner(RunnerBase):
self.nodes_under_load = nodes_under_load self.nodes_under_load = nodes_under_load
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Preparation steps") @reporter.step("Preparation steps")
def prepare( def prepare(
self, self,
load_params: LoadParams, load_params: LoadParams,
@ -298,7 +296,7 @@ class LocalRunner(RunnerBase):
return True return True
@reporter.step_deco("Prepare node {cluster_node}") @reporter.step("Prepare node {cluster_node}")
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams): def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams):
shell = cluster_node.host.get_shell() shell = cluster_node.host.get_shell()
@ -323,7 +321,7 @@ class LocalRunner(RunnerBase):
shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}')
shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}")
@reporter.step_deco("Init k6 instances") @reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = [] self.k6_instances = []
futures = parallel( futures = parallel(
@ -369,12 +367,12 @@ class LocalRunner(RunnerBase):
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
time.sleep(wait_after_start_time) time.sleep(wait_after_start_time)
@reporter.step_deco("Restore passwd on {cluster_node}") @reporter.step("Restore passwd on {cluster_node}")
def restore_passwd_on_node(self, cluster_node: ClusterNode): def restore_passwd_on_node(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell() shell = cluster_node.host.get_shell()
shell.exec("sudo chattr -i /etc/passwd") shell.exec("sudo chattr -i /etc/passwd")
@reporter.step_deco("Lock passwd on {cluster_node}") @reporter.step("Lock passwd on {cluster_node}")
def lock_passwd_on_node(self, cluster_node: ClusterNode): def lock_passwd_on_node(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell() shell = cluster_node.host.get_shell()
shell.exec("sudo chattr +i /etc/passwd") shell.exec("sudo chattr +i /etc/passwd")
@ -400,19 +398,19 @@ class S3LocalRunner(LocalRunner):
endpoints: list[str] endpoints: list[str]
k6_dir: str k6_dir: str
@reporter.step_deco("Run preset on loaders") @reporter.step("Run preset on loaders")
def preset(self): def preset(self):
LocalRunner.preset(self) LocalRunner.preset(self)
with reporter.step(f"Resolve containers in preset"): with reporter.step(f"Resolve containers in preset"):
parallel(self._resolve_containers_in_preset, self.k6_instances) parallel(self._resolve_containers_in_preset, self.k6_instances)
@reporter.step_deco("Resolve containers in preset") @reporter.step("Resolve containers in preset")
def _resolve_containers_in_preset(self, k6_instance: K6): def _resolve_containers_in_preset(self, k6_instance: K6):
k6_instance.shell.exec( k6_instance.shell.exec(
f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}" f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}"
) )
@reporter.step_deco("Init k6 instances") @reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = [] self.k6_instances = []
futures = parallel( futures = parallel(
@ -448,7 +446,7 @@ class S3LocalRunner(LocalRunner):
) )
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Preparation steps") @reporter.step("Preparation steps")
def prepare( def prepare(
self, self,
load_params: LoadParams, load_params: LoadParams,
@ -464,7 +462,7 @@ class S3LocalRunner(LocalRunner):
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer) parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer)
@reporter.step_deco("Prepare node {cluster_node}") @reporter.step("Prepare node {cluster_node}")
def prepare_node( def prepare_node(
self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str
): ):

View file

@ -8,18 +8,14 @@ from tenacity import retry
from tenacity.stop import stop_after_attempt from tenacity.stop import stop_after_attempt
from tenacity.wait import wait_fixed from tenacity.wait import wait_fixed
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.command_inspectors import SuInspector
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions
reporter = get_reporter()
class RemoteProcess: class RemoteProcess:
def __init__( def __init__(self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector]):
self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector]
):
self.process_dir = process_dir self.process_dir = process_dir
self.cmd = cmd self.cmd = cmd
self.stdout_last_line_number = 0 self.stdout_last_line_number = 0
@ -32,10 +28,8 @@ class RemoteProcess:
self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else []
@classmethod @classmethod
@reporter.step_deco("Create remote process") @reporter.step("Create remote process")
def create( def create(cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None) -> RemoteProcess:
cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None
) -> RemoteProcess:
""" """
Create a process on a remote host. Create a process on a remote host.
@ -68,7 +62,7 @@ class RemoteProcess:
remote_process.pid = remote_process._get_pid() remote_process.pid = remote_process._get_pid()
return remote_process return remote_process
@reporter.step_deco("Get process stdout") @reporter.step("Get process stdout")
def stdout(self, full: bool = False) -> str: def stdout(self, full: bool = False) -> str:
""" """
Method to get process stdout, either fresh info or full. Method to get process stdout, either fresh info or full.
@ -100,7 +94,7 @@ class RemoteProcess:
return resulted_stdout return resulted_stdout
return "" return ""
@reporter.step_deco("Get process stderr") @reporter.step("Get process stderr")
def stderr(self, full: bool = False) -> str: def stderr(self, full: bool = False) -> str:
""" """
Method to get process stderr, either fresh info or full. Method to get process stderr, either fresh info or full.
@ -131,7 +125,7 @@ class RemoteProcess:
return resulted_stderr return resulted_stderr
return "" return ""
@reporter.step_deco("Get process rc") @reporter.step("Get process rc")
def rc(self) -> Optional[int]: def rc(self) -> Optional[int]:
if self.proc_rc is not None: if self.proc_rc is not None:
return self.proc_rc return self.proc_rc
@ -148,11 +142,11 @@ class RemoteProcess:
self.proc_rc = int(terminal.stdout) self.proc_rc = int(terminal.stdout)
return self.proc_rc return self.proc_rc
@reporter.step_deco("Check if process is running") @reporter.step("Check if process is running")
def running(self) -> bool: def running(self) -> bool:
return self.rc() is None return self.rc() is None
@reporter.step_deco("Send signal to process") @reporter.step("Send signal to process")
def send_signal(self, signal: int) -> None: def send_signal(self, signal: int) -> None:
kill_res = self.shell.exec( kill_res = self.shell.exec(
f"kill -{signal} {self.pid}", f"kill -{signal} {self.pid}",
@ -161,27 +155,23 @@ class RemoteProcess:
if "No such process" in kill_res.stderr: if "No such process" in kill_res.stderr:
return return
if kill_res.return_code: if kill_res.return_code:
raise AssertionError( raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}")
f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}"
)
@reporter.step_deco("Stop process") @reporter.step("Stop process")
def stop(self) -> None: def stop(self) -> None:
self.send_signal(15) self.send_signal(15)
@reporter.step_deco("Kill process") @reporter.step("Kill process")
def kill(self) -> None: def kill(self) -> None:
self.send_signal(9) self.send_signal(9)
@reporter.step_deco("Clear process directory") @reporter.step("Clear process directory")
def clear(self) -> None: def clear(self) -> None:
if self.process_dir == "/": if self.process_dir == "/":
raise AssertionError(f"Invalid path to delete: {self.process_dir}") raise AssertionError(f"Invalid path to delete: {self.process_dir}")
self.shell.exec( self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)
)
@reporter.step_deco("Start remote process") @reporter.step("Start remote process")
def _start_process(self) -> None: def _start_process(self) -> None:
self.shell.exec( self.shell.exec(
f"nohup {self.process_dir}/command.sh </dev/null " f"nohup {self.process_dir}/command.sh </dev/null "
@ -190,29 +180,21 @@ class RemoteProcess:
CommandOptions(extra_inspectors=self.cmd_inspectors), CommandOptions(extra_inspectors=self.cmd_inspectors),
) )
@reporter.step_deco("Create process directory") @reporter.step("Create process directory")
def _create_process_dir(self) -> None: def _create_process_dir(self) -> None:
self.shell.exec( self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
) terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors))
self.shell.exec(
f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)
)
terminal = self.shell.exec(
f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)
)
self.process_dir = terminal.stdout.strip() self.process_dir = terminal.stdout.strip()
@reporter.step_deco("Get pid") @reporter.step("Get pid")
@retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True)
def _get_pid(self) -> str: def _get_pid(self) -> str:
terminal = self.shell.exec( terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors))
f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors)
)
assert terminal.stdout, f"invalid pid: {terminal.stdout}" assert terminal.stdout, f"invalid pid: {terminal.stdout}"
return terminal.stdout.strip() return terminal.stdout.strip()
@reporter.step_deco("Generate command script") @reporter.step("Generate command script")
def _generate_command_script(self, command: str) -> None: def _generate_command_script(self, command: str) -> None:
command = command.replace('"', '\\"').replace("\\", "\\\\") command = command.replace('"', '\\"').replace("\\", "\\\\")
script = ( script = (

View file

@ -1,3 +1,5 @@
from typing import Any
from frostfs_testlib.reporter.allure_handler import AllureHandler from frostfs_testlib.reporter.allure_handler import AllureHandler
from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.interfaces import ReporterHandler
from frostfs_testlib.reporter.reporter import Reporter from frostfs_testlib.reporter.reporter import Reporter
@ -20,3 +22,7 @@ def get_reporter() -> Reporter:
def step(title: str): def step(title: str):
return __reporter.step(title) return __reporter.step(title)
def attach(content: Any, file_name: str):
return __reporter.attach(content, file_name)

View file

@ -21,9 +21,14 @@ class AllureHandler(ReporterHandler):
def attach(self, body: Any, file_name: str) -> None: def attach(self, body: Any, file_name: str) -> None:
attachment_name, extension = os.path.splitext(file_name) attachment_name, extension = os.path.splitext(file_name)
if extension.startswith("."):
extension = extension[1:]
attachment_type = self._resolve_attachment_type(extension) attachment_type = self._resolve_attachment_type(extension)
allure.attach(body, attachment_name, attachment_type, extension) if os.path.exists(body):
allure.attach.file(body, file_name, attachment_type, extension)
else:
allure.attach(body, attachment_name, attachment_type, extension)
def _resolve_attachment_type(self, extension: str) -> attachment_type: def _resolve_attachment_type(self, extension: str) -> attachment_type:
"""Try to find matching Allure attachment type by extension. """Try to find matching Allure attachment type by extension.

View file

@ -6,7 +6,7 @@ from datetime import datetime
from time import sleep from time import sleep
from typing import Literal, Optional, Union from typing import Literal, Optional, Union
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell import CommandOptions
@ -15,7 +15,6 @@ from frostfs_testlib.shell.local_shell import LocalShell
# TODO: Refactor this code to use shell instead of _cmd_run # TODO: Refactor this code to use shell instead of _cmd_run
from frostfs_testlib.utils.cli_utils import _configure_aws_cli from frostfs_testlib.utils.cli_utils import _configure_aws_cli
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
command_options = CommandOptions(timeout=480) command_options = CommandOptions(timeout=480)
@ -28,8 +27,10 @@ class AwsCliClient(S3ClientWrapper):
common_flags = "--no-verify-ssl --no-paginate" common_flags = "--no-verify-ssl --no-paginate"
s3gate_endpoint: str s3gate_endpoint: str
@reporter.step_deco("Configure S3 client (aws cli)") @reporter.step("Configure S3 client (aws cli)")
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: def __init__(
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default"
) -> None:
self.s3gate_endpoint = s3gate_endpoint self.s3gate_endpoint = s3gate_endpoint
self.profile = profile self.profile = profile
self.local_shell = LocalShell() self.local_shell = LocalShell()
@ -42,11 +43,11 @@ class AwsCliClient(S3ClientWrapper):
except Exception as err: except Exception as err:
raise RuntimeError("Error while configuring AwsCliClient") from err raise RuntimeError("Error while configuring AwsCliClient") from err
@reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") @reporter.step("Set endpoint S3 to {s3gate_endpoint}")
def set_endpoint(self, s3gate_endpoint: str): def set_endpoint(self, s3gate_endpoint: str):
self.s3gate_endpoint = s3gate_endpoint self.s3gate_endpoint = s3gate_endpoint
@reporter.step_deco("Create bucket S3") @reporter.step("Create bucket S3")
def create_bucket( def create_bucket(
self, self,
bucket: Optional[str] = None, bucket: Optional[str] = None,
@ -85,25 +86,25 @@ class AwsCliClient(S3ClientWrapper):
return bucket return bucket
@reporter.step_deco("List buckets S3") @reporter.step("List buckets S3")
def list_buckets(self) -> list[str]: def list_buckets(self) -> list[str]:
cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
buckets_json = self._to_json(output) buckets_json = self._to_json(output)
return [bucket["Name"] for bucket in buckets_json["Buckets"]] return [bucket["Name"] for bucket in buckets_json["Buckets"]]
@reporter.step_deco("Delete bucket S3") @reporter.step("Delete bucket S3")
def delete_bucket(self, bucket: str) -> None: def delete_bucket(self, bucket: str) -> None:
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
self.local_shell.exec(cmd, command_options) self.local_shell.exec(cmd, command_options)
sleep(S3_SYNC_WAIT_TIME) sleep(S3_SYNC_WAIT_TIME)
@reporter.step_deco("Head bucket S3") @reporter.step("Head bucket S3")
def head_bucket(self, bucket: str) -> None: def head_bucket(self, bucket: str) -> None:
cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Put bucket versioning status") @reporter.step("Put bucket versioning status")
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} "
@ -112,7 +113,7 @@ class AwsCliClient(S3ClientWrapper):
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Get bucket versioning status") @reporter.step("Get bucket versioning status")
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} "
@ -122,7 +123,7 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response.get("Status") return response.get("Status")
@reporter.step_deco("Put bucket tagging") @reporter.step("Put bucket tagging")
def put_bucket_tagging(self, bucket: str, tags: list) -> None: def put_bucket_tagging(self, bucket: str, tags: list) -> None:
tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]}
cmd = ( cmd = (
@ -131,34 +132,42 @@ class AwsCliClient(S3ClientWrapper):
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Get bucket tagging") @reporter.step("Get bucket tagging")
def get_bucket_tagging(self, bucket: str) -> list: def get_bucket_tagging(self, bucket: str) -> list:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
) )
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
return response.get("TagSet") return response.get("TagSet")
@reporter.step_deco("Get bucket acl") @reporter.step("Get bucket acl")
def get_bucket_acl(self, bucket: str) -> list: def get_bucket_acl(self, bucket: str) -> list:
cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = (
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
return response.get("Grants") return response.get("Grants")
@reporter.step_deco("Get bucket location") @reporter.step("Get bucket location")
def get_bucket_location(self, bucket: str) -> dict: def get_bucket_location(self, bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
) )
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
return response.get("LocationConstraint") return response.get("LocationConstraint")
@reporter.step_deco("List objects S3") @reporter.step("List objects S3")
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = (
f"aws {self.common_flags} s3api list-objects --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
@ -167,9 +176,12 @@ class AwsCliClient(S3ClientWrapper):
return response if full_output else obj_list return response if full_output else obj_list
@reporter.step_deco("List objects S3 v2") @reporter.step("List objects S3 v2")
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = (
f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
@ -178,7 +190,7 @@ class AwsCliClient(S3ClientWrapper):
return response if full_output else obj_list return response if full_output else obj_list
@reporter.step_deco("List objects versions S3") @reporter.step("List objects versions S3")
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} "
@ -188,7 +200,7 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response if full_output else response.get("Versions", []) return response if full_output else response.get("Versions", [])
@reporter.step_deco("List objects delete markers S3") @reporter.step("List objects delete markers S3")
def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: def list_delete_markers(self, bucket: str, full_output: bool = False) -> list:
cmd = ( cmd = (
f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} "
@ -198,7 +210,7 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response if full_output else response.get("DeleteMarkers", []) return response if full_output else response.get("DeleteMarkers", [])
@reporter.step_deco("Copy object S3") @reporter.step("Copy object S3")
def copy_object( def copy_object(
self, self,
source_bucket: str, source_bucket: str,
@ -236,7 +248,7 @@ class AwsCliClient(S3ClientWrapper):
self.local_shell.exec(cmd, command_options) self.local_shell.exec(cmd, command_options)
return key return key
@reporter.step_deco("Put object S3") @reporter.step("Put object S3")
def put_object( def put_object(
self, self,
bucket: str, bucket: str,
@ -280,7 +292,7 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response.get("VersionId") return response.get("VersionId")
@reporter.step_deco("Head object S3") @reporter.step("Head object S3")
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
version = f" --version-id {version_id}" if version_id else "" version = f" --version-id {version_id}" if version_id else ""
cmd = ( cmd = (
@ -291,7 +303,7 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response return response
@reporter.step_deco("Get object S3") @reporter.step("Get object S3")
def get_object( def get_object(
self, self,
bucket: str, bucket: str,
@ -312,7 +324,7 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response if full_output else file_path return response if full_output else file_path
@reporter.step_deco("Get object ACL") @reporter.step("Get object ACL")
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
version = f" --version-id {version_id}" if version_id else "" version = f" --version-id {version_id}" if version_id else ""
cmd = ( cmd = (
@ -323,7 +335,7 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response.get("Grants") return response.get("Grants")
@reporter.step_deco("Put object ACL") @reporter.step("Put object ACL")
def put_object_acl( def put_object_acl(
self, self,
bucket: str, bucket: str,
@ -346,7 +358,7 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response.get("Grants") return response.get("Grants")
@reporter.step_deco("Put bucket ACL") @reporter.step("Put bucket ACL")
def put_bucket_acl( def put_bucket_acl(
self, self,
bucket: str, bucket: str,
@ -354,7 +366,10 @@ class AwsCliClient(S3ClientWrapper):
grant_write: Optional[str] = None, grant_write: Optional[str] = None,
grant_read: Optional[str] = None, grant_read: Optional[str] = None,
) -> None: ) -> None:
cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = (
f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} "
f" --endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
if acl: if acl:
cmd += f" --acl {acl}" cmd += f" --acl {acl}"
if grant_write: if grant_write:
@ -363,7 +378,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --grant-read {grant_read}" cmd += f" --grant-read {grant_read}"
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Delete objects S3") @reporter.step("Delete objects S3")
def delete_objects(self, bucket: str, keys: list[str]) -> dict: def delete_objects(self, bucket: str, keys: list[str]) -> dict:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json")
delete_structure = json.dumps(_make_objs_dict(keys)) delete_structure = json.dumps(_make_objs_dict(keys))
@ -380,7 +395,7 @@ class AwsCliClient(S3ClientWrapper):
sleep(S3_SYNC_WAIT_TIME) sleep(S3_SYNC_WAIT_TIME)
return response return response
@reporter.step_deco("Delete object S3") @reporter.step("Delete object S3")
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
version = f" --version-id {version_id}" if version_id else "" version = f" --version-id {version_id}" if version_id else ""
cmd = ( cmd = (
@ -391,7 +406,7 @@ class AwsCliClient(S3ClientWrapper):
sleep(S3_SYNC_WAIT_TIME) sleep(S3_SYNC_WAIT_TIME)
return self._to_json(output) return self._to_json(output)
@reporter.step_deco("Delete object versions S3") @reporter.step("Delete object versions S3")
def delete_object_versions(self, bucket: str, object_versions: list) -> dict: def delete_object_versions(self, bucket: str, object_versions: list) -> dict:
# Build deletion list in S3 format # Build deletion list in S3 format
delete_list = { delete_list = {
@ -418,13 +433,13 @@ class AwsCliClient(S3ClientWrapper):
sleep(S3_SYNC_WAIT_TIME) sleep(S3_SYNC_WAIT_TIME)
return self._to_json(output) return self._to_json(output)
@reporter.step_deco("Delete object versions S3 without delete markers") @reporter.step("Delete object versions S3 without delete markers")
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
# Delete objects without creating delete markers # Delete objects without creating delete markers
for object_version in object_versions: for object_version in object_versions:
self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"])
@reporter.step_deco("Get object attributes") @reporter.step("Get object attributes")
def get_object_attributes( def get_object_attributes(
self, self,
bucket: str, bucket: str,
@ -456,14 +471,17 @@ class AwsCliClient(S3ClientWrapper):
else: else:
return response.get(attributes[0]) return response.get(attributes[0])
@reporter.step_deco("Get bucket policy") @reporter.step("Get bucket policy")
def get_bucket_policy(self, bucket: str) -> dict: def get_bucket_policy(self, bucket: str) -> dict:
cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = (
f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
return response.get("Policy") return response.get("Policy")
@reporter.step_deco("Put bucket policy") @reporter.step("Put bucket policy")
def put_bucket_policy(self, bucket: str, policy: dict) -> None: def put_bucket_policy(self, bucket: str, policy: dict) -> None:
# Leaving it as is was in test repo. Double dumps to escape resulting string # Leaving it as is was in test repo. Double dumps to escape resulting string
# Example: # Example:
@ -478,14 +496,17 @@ class AwsCliClient(S3ClientWrapper):
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Get bucket cors") @reporter.step("Get bucket cors")
def get_bucket_cors(self, bucket: str) -> dict: def get_bucket_cors(self, bucket: str) -> dict:
cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" cmd = (
f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
return response.get("CORSRules") return response.get("CORSRules")
@reporter.step_deco("Put bucket cors") @reporter.step("Put bucket cors")
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} "
@ -493,14 +514,15 @@ class AwsCliClient(S3ClientWrapper):
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Delete bucket cors") @reporter.step("Delete bucket cors")
def delete_bucket_cors(self, bucket: str) -> None: def delete_bucket_cors(self, bucket: str) -> None:
cmd = ( cmd = (
f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Delete bucket tagging") @reporter.step("Delete bucket tagging")
def delete_bucket_tagging(self, bucket: str) -> None: def delete_bucket_tagging(self, bucket: str) -> None:
cmd = ( cmd = (
f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} "
@ -508,7 +530,7 @@ class AwsCliClient(S3ClientWrapper):
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Put object retention") @reporter.step("Put object retention")
def put_object_retention( def put_object_retention(
self, self,
bucket: str, bucket: str,
@ -526,7 +548,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += " --bypass-governance-retention" cmd += " --bypass-governance-retention"
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Put object legal hold") @reporter.step("Put object legal hold")
def put_object_legal_hold( def put_object_legal_hold(
self, self,
bucket: str, bucket: str,
@ -542,7 +564,7 @@ class AwsCliClient(S3ClientWrapper):
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Put object tagging") @reporter.step("Put object tagging")
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags} tagging = {"TagSet": tags}
@ -552,7 +574,7 @@ class AwsCliClient(S3ClientWrapper):
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Get object tagging") @reporter.step("Get object tagging")
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
version = f" --version-id {version_id}" if version_id else "" version = f" --version-id {version_id}" if version_id else ""
cmd = ( cmd = (
@ -563,7 +585,7 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response.get("TagSet") return response.get("TagSet")
@reporter.step_deco("Delete object tagging") @reporter.step("Delete object tagging")
def delete_object_tagging(self, bucket: str, key: str) -> None: def delete_object_tagging(self, bucket: str, key: str) -> None:
cmd = ( cmd = (
f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} "
@ -571,7 +593,7 @@ class AwsCliClient(S3ClientWrapper):
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Sync directory S3") @reporter.step("Sync directory S3")
def sync( def sync(
self, self,
bucket: str, bucket: str,
@ -579,7 +601,10 @@ class AwsCliClient(S3ClientWrapper):
acl: Optional[str] = None, acl: Optional[str] = None,
metadata: Optional[dict] = None, metadata: Optional[dict] = None,
) -> dict: ) -> dict:
cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" cmd = (
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} "
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
)
if metadata: if metadata:
cmd += " --metadata" cmd += " --metadata"
for key, value in metadata.items(): for key, value in metadata.items():
@ -589,7 +614,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd, command_options).stdout output = self.local_shell.exec(cmd, command_options).stdout
return self._to_json(output) return self._to_json(output)
@reporter.step_deco("CP directory S3") @reporter.step("CP directory S3")
def cp( def cp(
self, self,
bucket: str, bucket: str,
@ -610,7 +635,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd, command_options).stdout output = self.local_shell.exec(cmd, command_options).stdout
return self._to_json(output) return self._to_json(output)
@reporter.step_deco("Create multipart upload S3") @reporter.step("Create multipart upload S3")
def create_multipart_upload(self, bucket: str, key: str) -> str: def create_multipart_upload(self, bucket: str, key: str) -> str:
cmd = ( cmd = (
f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} "
@ -623,7 +648,7 @@ class AwsCliClient(S3ClientWrapper):
return response["UploadId"] return response["UploadId"]
@reporter.step_deco("List multipart uploads S3") @reporter.step("List multipart uploads S3")
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
cmd = ( cmd = (
f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} "
@ -633,7 +658,7 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output) response = self._to_json(output)
return response.get("Uploads") return response.get("Uploads")
@reporter.step_deco("Abort multipart upload S3") @reporter.step("Abort multipart upload S3")
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
cmd = ( cmd = (
f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} "
@ -641,7 +666,7 @@ class AwsCliClient(S3ClientWrapper):
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Upload part S3") @reporter.step("Upload part S3")
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str:
cmd = ( cmd = (
f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} "
@ -653,7 +678,7 @@ class AwsCliClient(S3ClientWrapper):
assert response.get("ETag"), f"Expected ETag in response:\n{response}" assert response.get("ETag"), f"Expected ETag in response:\n{response}"
return response["ETag"] return response["ETag"]
@reporter.step_deco("Upload copy part S3") @reporter.step("Upload copy part S3")
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
cmd = ( cmd = (
f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} "
@ -666,7 +691,7 @@ class AwsCliClient(S3ClientWrapper):
return response["CopyPartResult"]["ETag"] return response["CopyPartResult"]["ETag"]
@reporter.step_deco("List parts S3") @reporter.step("List parts S3")
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
cmd = ( cmd = (
f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} "
@ -679,7 +704,7 @@ class AwsCliClient(S3ClientWrapper):
return response["Parts"] return response["Parts"]
@reporter.step_deco("Complete multipart upload S3") @reporter.step("Complete multipart upload S3")
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json")
parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]}
@ -696,7 +721,7 @@ class AwsCliClient(S3ClientWrapper):
) )
self.local_shell.exec(cmd) self.local_shell.exec(cmd)
@reporter.step_deco("Put object lock configuration") @reporter.step("Put object lock configuration")
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} "
@ -705,7 +730,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
return self._to_json(output) return self._to_json(output)
@reporter.step_deco("Get object lock configuration") @reporter.step("Get object lock configuration")
def get_object_lock_configuration(self, bucket: str): def get_object_lock_configuration(self, bucket: str):
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} "

View file

@ -13,17 +13,11 @@ from botocore.config import Config
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from mypy_boto3_s3 import S3Client from mypy_boto3_s3 import S3Client
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ( from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
ASSETS_DIR,
MAX_REQUEST_ATTEMPTS,
RETRY_MODE,
S3_SYNC_WAIT_TIME,
)
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.utils.cli_utils import log_command_execution from frostfs_testlib.utils.cli_utils import log_command_execution
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
# Disable warnings on self-signed certificate which the # Disable warnings on self-signed certificate which the
@ -46,9 +40,11 @@ def report_error(func):
class Boto3ClientWrapper(S3ClientWrapper): class Boto3ClientWrapper(S3ClientWrapper):
__repr_name__: str = "Boto3 client" __repr_name__: str = "Boto3 client"
@reporter.step_deco("Configure S3 client (boto3)") @reporter.step("Configure S3 client (boto3)")
@report_error @report_error
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: def __init__(
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default"
) -> None:
self.boto3_client: S3Client = None self.boto3_client: S3Client = None
self.session = boto3.Session(profile_name=profile) self.session = boto3.Session(profile_name=profile)
self.config = Config( self.config = Config(
@ -62,7 +58,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
self.s3gate_endpoint: str = "" self.s3gate_endpoint: str = ""
self.set_endpoint(s3gate_endpoint) self.set_endpoint(s3gate_endpoint)
@reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") @reporter.step("Set endpoint S3 to {s3gate_endpoint}")
def set_endpoint(self, s3gate_endpoint: str): def set_endpoint(self, s3gate_endpoint: str):
if self.s3gate_endpoint == s3gate_endpoint: if self.s3gate_endpoint == s3gate_endpoint:
return return
@ -90,7 +86,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return result return result
# BUCKET METHODS # # BUCKET METHODS #
@reporter.step_deco("Create bucket S3") @reporter.step("Create bucket S3")
@report_error @report_error
def create_bucket( def create_bucket(
self, self,
@ -118,16 +114,14 @@ class Boto3ClientWrapper(S3ClientWrapper):
elif grant_full_control: elif grant_full_control:
params.update({"GrantFullControl": grant_full_control}) params.update({"GrantFullControl": grant_full_control})
if location_constraint: if location_constraint:
params.update( params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}})
{"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}
)
s3_bucket = self.boto3_client.create_bucket(**params) s3_bucket = self.boto3_client.create_bucket(**params)
log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket)
sleep(S3_SYNC_WAIT_TIME) sleep(S3_SYNC_WAIT_TIME)
return bucket return bucket
@reporter.step_deco("List buckets S3") @reporter.step("List buckets S3")
@report_error @report_error
def list_buckets(self) -> list[str]: def list_buckets(self) -> list[str]:
found_buckets = [] found_buckets = []
@ -140,20 +134,20 @@ class Boto3ClientWrapper(S3ClientWrapper):
return found_buckets return found_buckets
@reporter.step_deco("Delete bucket S3") @reporter.step("Delete bucket S3")
@report_error @report_error
def delete_bucket(self, bucket: str) -> None: def delete_bucket(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket(Bucket=bucket) response = self.boto3_client.delete_bucket(Bucket=bucket)
log_command_execution("S3 Delete bucket result", response) log_command_execution("S3 Delete bucket result", response)
sleep(S3_SYNC_WAIT_TIME) sleep(S3_SYNC_WAIT_TIME)
@reporter.step_deco("Head bucket S3") @reporter.step("Head bucket S3")
@report_error @report_error
def head_bucket(self, bucket: str) -> None: def head_bucket(self, bucket: str) -> None:
response = self.boto3_client.head_bucket(Bucket=bucket) response = self.boto3_client.head_bucket(Bucket=bucket)
log_command_execution("S3 Head bucket result", response) log_command_execution("S3 Head bucket result", response)
@reporter.step_deco("Put bucket versioning status") @reporter.step("Put bucket versioning status")
@report_error @report_error
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
response = self.boto3_client.put_bucket_versioning( response = self.boto3_client.put_bucket_versioning(
@ -161,7 +155,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
) )
log_command_execution("S3 Set bucket versioning to", response) log_command_execution("S3 Set bucket versioning to", response)
@reporter.step_deco("Get bucket versioning status") @reporter.step("Get bucket versioning status")
@report_error @report_error
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
response = self.boto3_client.get_bucket_versioning(Bucket=bucket) response = self.boto3_client.get_bucket_versioning(Bucket=bucket)
@ -169,7 +163,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Got bucket versioning status", response) log_command_execution("S3 Got bucket versioning status", response)
return status return status
@reporter.step_deco("Put bucket tagging") @reporter.step("Put bucket tagging")
@report_error @report_error
def put_bucket_tagging(self, bucket: str, tags: list) -> None: def put_bucket_tagging(self, bucket: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
@ -177,27 +171,27 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging) response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging)
log_command_execution("S3 Put bucket tagging", response) log_command_execution("S3 Put bucket tagging", response)
@reporter.step_deco("Get bucket tagging") @reporter.step("Get bucket tagging")
@report_error @report_error
def get_bucket_tagging(self, bucket: str) -> list: def get_bucket_tagging(self, bucket: str) -> list:
response = self.boto3_client.get_bucket_tagging(Bucket=bucket) response = self.boto3_client.get_bucket_tagging(Bucket=bucket)
log_command_execution("S3 Get bucket tagging", response) log_command_execution("S3 Get bucket tagging", response)
return response.get("TagSet") return response.get("TagSet")
@reporter.step_deco("Get bucket acl") @reporter.step("Get bucket acl")
@report_error @report_error
def get_bucket_acl(self, bucket: str) -> list: def get_bucket_acl(self, bucket: str) -> list:
response = self.boto3_client.get_bucket_acl(Bucket=bucket) response = self.boto3_client.get_bucket_acl(Bucket=bucket)
log_command_execution("S3 Get bucket acl", response) log_command_execution("S3 Get bucket acl", response)
return response.get("Grants") return response.get("Grants")
@reporter.step_deco("Delete bucket tagging") @reporter.step("Delete bucket tagging")
@report_error @report_error
def delete_bucket_tagging(self, bucket: str) -> None: def delete_bucket_tagging(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) response = self.boto3_client.delete_bucket_tagging(Bucket=bucket)
log_command_execution("S3 Delete bucket tagging", response) log_command_execution("S3 Delete bucket tagging", response)
@reporter.step_deco("Put bucket ACL") @reporter.step("Put bucket ACL")
@report_error @report_error
def put_bucket_acl( def put_bucket_acl(
self, self,
@ -214,60 +208,56 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_client.put_bucket_acl(**params) response = self.boto3_client.put_bucket_acl(**params)
log_command_execution("S3 ACL bucket result", response) log_command_execution("S3 ACL bucket result", response)
@reporter.step_deco("Put object lock configuration") @reporter.step("Put object lock configuration")
@report_error @report_error
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
response = self.boto3_client.put_object_lock_configuration( response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration)
Bucket=bucket, ObjectLockConfiguration=configuration
)
log_command_execution("S3 put_object_lock_configuration result", response) log_command_execution("S3 put_object_lock_configuration result", response)
return response return response
@reporter.step_deco("Get object lock configuration") @reporter.step("Get object lock configuration")
@report_error @report_error
def get_object_lock_configuration(self, bucket: str) -> dict: def get_object_lock_configuration(self, bucket: str) -> dict:
response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) response = self.boto3_client.get_object_lock_configuration(Bucket=bucket)
log_command_execution("S3 get_object_lock_configuration result", response) log_command_execution("S3 get_object_lock_configuration result", response)
return response.get("ObjectLockConfiguration") return response.get("ObjectLockConfiguration")
@reporter.step_deco("Get bucket policy") @reporter.step("Get bucket policy")
@report_error @report_error
def get_bucket_policy(self, bucket: str) -> str: def get_bucket_policy(self, bucket: str) -> str:
response = self.boto3_client.get_bucket_policy(Bucket=bucket) response = self.boto3_client.get_bucket_policy(Bucket=bucket)
log_command_execution("S3 get_bucket_policy result", response) log_command_execution("S3 get_bucket_policy result", response)
return response.get("Policy") return response.get("Policy")
@reporter.step_deco("Put bucket policy") @reporter.step("Put bucket policy")
@report_error @report_error
def put_bucket_policy(self, bucket: str, policy: dict) -> None: def put_bucket_policy(self, bucket: str, policy: dict) -> None:
response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)) response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy))
log_command_execution("S3 put_bucket_policy result", response) log_command_execution("S3 put_bucket_policy result", response)
return response return response
@reporter.step_deco("Get bucket cors") @reporter.step("Get bucket cors")
@report_error @report_error
def get_bucket_cors(self, bucket: str) -> dict: def get_bucket_cors(self, bucket: str) -> dict:
response = self.boto3_client.get_bucket_cors(Bucket=bucket) response = self.boto3_client.get_bucket_cors(Bucket=bucket)
log_command_execution("S3 get_bucket_cors result", response) log_command_execution("S3 get_bucket_cors result", response)
return response.get("CORSRules") return response.get("CORSRules")
@reporter.step_deco("Get bucket location") @reporter.step("Get bucket location")
@report_error @report_error
def get_bucket_location(self, bucket: str) -> str: def get_bucket_location(self, bucket: str) -> str:
response = self.boto3_client.get_bucket_location(Bucket=bucket) response = self.boto3_client.get_bucket_location(Bucket=bucket)
log_command_execution("S3 get_bucket_location result", response) log_command_execution("S3 get_bucket_location result", response)
return response.get("LocationConstraint") return response.get("LocationConstraint")
@reporter.step_deco("Put bucket cors") @reporter.step("Put bucket cors")
@report_error @report_error
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
response = self.boto3_client.put_bucket_cors( response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration)
Bucket=bucket, CORSConfiguration=cors_configuration
)
log_command_execution("S3 put_bucket_cors result", response) log_command_execution("S3 put_bucket_cors result", response)
return response return response
@reporter.step_deco("Delete bucket cors") @reporter.step("Delete bucket cors")
@report_error @report_error
def delete_bucket_cors(self, bucket: str) -> None: def delete_bucket_cors(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket_cors(Bucket=bucket) response = self.boto3_client.delete_bucket_cors(Bucket=bucket)
@ -276,7 +266,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
# END OF BUCKET METHODS # # END OF BUCKET METHODS #
# OBJECT METHODS # # OBJECT METHODS #
@reporter.step_deco("List objects S3 v2") @reporter.step("List objects S3 v2")
@report_error @report_error
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
response = self.boto3_client.list_objects_v2(Bucket=bucket) response = self.boto3_client.list_objects_v2(Bucket=bucket)
@ -287,7 +277,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response if full_output else obj_list return response if full_output else obj_list
@reporter.step_deco("List objects S3") @reporter.step("List objects S3")
@report_error @report_error
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
response = self.boto3_client.list_objects(Bucket=bucket) response = self.boto3_client.list_objects(Bucket=bucket)
@ -298,21 +288,21 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response if full_output else obj_list return response if full_output else obj_list
@reporter.step_deco("List objects versions S3") @reporter.step("List objects versions S3")
@report_error @report_error
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
response = self.boto3_client.list_object_versions(Bucket=bucket) response = self.boto3_client.list_object_versions(Bucket=bucket)
log_command_execution("S3 List objects versions result", response) log_command_execution("S3 List objects versions result", response)
return response if full_output else response.get("Versions", []) return response if full_output else response.get("Versions", [])
@reporter.step_deco("List objects delete markers S3") @reporter.step("List objects delete markers S3")
@report_error @report_error
def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: def list_delete_markers(self, bucket: str, full_output: bool = False) -> list:
response = self.boto3_client.list_object_versions(Bucket=bucket) response = self.boto3_client.list_object_versions(Bucket=bucket)
log_command_execution("S3 List objects delete markers result", response) log_command_execution("S3 List objects delete markers result", response)
return response if full_output else response.get("DeleteMarkers", []) return response if full_output else response.get("DeleteMarkers", [])
@reporter.step_deco("Put object S3") @reporter.step("Put object S3")
@report_error @report_error
def put_object( def put_object(
self, self,
@ -343,7 +333,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Put object result", response) log_command_execution("S3 Put object result", response)
return response.get("VersionId") return response.get("VersionId")
@reporter.step_deco("Head object S3") @reporter.step("Head object S3")
@report_error @report_error
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = { params = {
@ -355,7 +345,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Head object result", response) log_command_execution("S3 Head object result", response)
return response return response
@reporter.step_deco("Delete object S3") @reporter.step("Delete object S3")
@report_error @report_error
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = { params = {
@ -368,7 +358,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
sleep(S3_SYNC_WAIT_TIME) sleep(S3_SYNC_WAIT_TIME)
return response return response
@reporter.step_deco("Delete objects S3") @reporter.step("Delete objects S3")
@report_error @report_error
def delete_objects(self, bucket: str, keys: list[str]) -> dict: def delete_objects(self, bucket: str, keys: list[str]) -> dict:
response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys)) response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys))
@ -379,7 +369,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
sleep(S3_SYNC_WAIT_TIME) sleep(S3_SYNC_WAIT_TIME)
return response return response
@reporter.step_deco("Delete object versions S3") @reporter.step("Delete object versions S3")
@report_error @report_error
def delete_object_versions(self, bucket: str, object_versions: list) -> dict: def delete_object_versions(self, bucket: str, object_versions: list) -> dict:
# Build deletion list in S3 format # Build deletion list in S3 format
@ -396,7 +386,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Delete objects result", response) log_command_execution("S3 Delete objects result", response)
return response return response
@reporter.step_deco("Delete object versions S3 without delete markers") @reporter.step("Delete object versions S3 without delete markers")
@report_error @report_error
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
# Delete objects without creating delete markers # Delete objects without creating delete markers
@ -406,7 +396,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
) )
log_command_execution("S3 Delete object result", response) log_command_execution("S3 Delete object result", response)
@reporter.step_deco("Put object ACL") @reporter.step("Put object ACL")
@report_error @report_error
def put_object_acl( def put_object_acl(
self, self,
@ -419,7 +409,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
# pytest.skip("Method put_object_acl is not supported by boto3 client") # pytest.skip("Method put_object_acl is not supported by boto3 client")
raise NotImplementedError("Unsupported for boto3 client") raise NotImplementedError("Unsupported for boto3 client")
@reporter.step_deco("Get object ACL") @reporter.step("Get object ACL")
@report_error @report_error
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = { params = {
@ -431,7 +421,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 ACL objects result", response) log_command_execution("S3 ACL objects result", response)
return response.get("Grants") return response.get("Grants")
@reporter.step_deco("Copy object S3") @reporter.step("Copy object S3")
@report_error @report_error
def copy_object( def copy_object(
self, self,
@ -460,7 +450,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Copy objects result", response) log_command_execution("S3 Copy objects result", response)
return key return key
@reporter.step_deco("Get object S3") @reporter.step("Get object S3")
@report_error @report_error
def get_object( def get_object(
self, self,
@ -478,8 +468,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
params = { params = {
self._to_s3_param(param): value self._to_s3_param(param): value
for param, value in {**locals(), **{"Range": range_str}}.items() for param, value in {**locals(), **{"Range": range_str}}.items()
if param not in ["self", "object_range", "full_output", "range_str", "filename"] if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None
and value is not None
} }
response = self.boto3_client.get_object(**params) response = self.boto3_client.get_object(**params)
log_command_execution("S3 Get objects result", response) log_command_execution("S3 Get objects result", response)
@ -491,7 +480,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
chunk = response["Body"].read(1024) chunk = response["Body"].read(1024)
return response if full_output else filename return response if full_output else filename
@reporter.step_deco("Create multipart upload S3") @reporter.step("Create multipart upload S3")
@report_error @report_error
def create_multipart_upload(self, bucket: str, key: str) -> str: def create_multipart_upload(self, bucket: str, key: str) -> str:
response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key) response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key)
@ -500,7 +489,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response["UploadId"] return response["UploadId"]
@reporter.step_deco("List multipart uploads S3") @reporter.step("List multipart uploads S3")
@report_error @report_error
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
response = self.boto3_client.list_multipart_uploads(Bucket=bucket) response = self.boto3_client.list_multipart_uploads(Bucket=bucket)
@ -508,19 +497,15 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response.get("Uploads") return response.get("Uploads")
@reporter.step_deco("Abort multipart upload S3") @reporter.step("Abort multipart upload S3")
@report_error @report_error
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
response = self.boto3_client.abort_multipart_upload( response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id)
Bucket=bucket, Key=key, UploadId=upload_id
)
log_command_execution("S3 Abort multipart upload", response) log_command_execution("S3 Abort multipart upload", response)
@reporter.step_deco("Upload part S3") @reporter.step("Upload part S3")
@report_error @report_error
def upload_part( def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str:
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
) -> str:
with open(filepath, "rb") as put_file: with open(filepath, "rb") as put_file:
body = put_file.read() body = put_file.read()
@ -536,11 +521,9 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response["ETag"] return response["ETag"]
@reporter.step_deco("Upload copy part S3") @reporter.step("Upload copy part S3")
@report_error @report_error
def upload_part_copy( def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
) -> str:
response = self.boto3_client.upload_part_copy( response = self.boto3_client.upload_part_copy(
UploadId=upload_id, UploadId=upload_id,
Bucket=bucket, Bucket=bucket,
@ -549,13 +532,11 @@ class Boto3ClientWrapper(S3ClientWrapper):
CopySource=copy_source, CopySource=copy_source,
) )
log_command_execution("S3 Upload copy part", response) log_command_execution("S3 Upload copy part", response)
assert response.get("CopyPartResult", []).get( assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}"
"ETag"
), f"Expected ETag in response:\n{response}"
return response["CopyPartResult"]["ETag"] return response["CopyPartResult"]["ETag"]
@reporter.step_deco("List parts S3") @reporter.step("List parts S3")
@report_error @report_error
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key) response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key)
@ -564,7 +545,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response["Parts"] return response["Parts"]
@reporter.step_deco("Complete multipart upload S3") @reporter.step("Complete multipart upload S3")
@report_error @report_error
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
@ -573,7 +554,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
) )
log_command_execution("S3 Complete multipart upload", response) log_command_execution("S3 Complete multipart upload", response)
@reporter.step_deco("Put object retention") @reporter.step("Put object retention")
@report_error @report_error
def put_object_retention( def put_object_retention(
self, self,
@ -591,7 +572,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_client.put_object_retention(**params) response = self.boto3_client.put_object_retention(**params)
log_command_execution("S3 Put object retention ", response) log_command_execution("S3 Put object retention ", response)
@reporter.step_deco("Put object legal hold") @reporter.step("Put object legal hold")
@report_error @report_error
def put_object_legal_hold( def put_object_legal_hold(
self, self,
@ -609,7 +590,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_client.put_object_legal_hold(**params) response = self.boto3_client.put_object_legal_hold(**params)
log_command_execution("S3 Put object legal hold ", response) log_command_execution("S3 Put object legal hold ", response)
@reporter.step_deco("Put object tagging") @reporter.step("Put object tagging")
@report_error @report_error
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
@ -617,7 +598,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging) response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging)
log_command_execution("S3 Put object tagging", response) log_command_execution("S3 Put object tagging", response)
@reporter.step_deco("Get object tagging") @reporter.step("Get object tagging")
@report_error @report_error
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = { params = {
@ -629,13 +610,13 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Get object tagging", response) log_command_execution("S3 Get object tagging", response)
return response.get("TagSet") return response.get("TagSet")
@reporter.step_deco("Delete object tagging") @reporter.step("Delete object tagging")
@report_error @report_error
def delete_object_tagging(self, bucket: str, key: str) -> None: def delete_object_tagging(self, bucket: str, key: str) -> None:
response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key) response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key)
log_command_execution("S3 Delete object tagging", response) log_command_execution("S3 Delete object tagging", response)
@reporter.step_deco("Get object attributes") @reporter.step("Get object attributes")
@report_error @report_error
def get_object_attributes( def get_object_attributes(
self, self,
@ -650,7 +631,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
logger.warning("Method get_object_attributes is not supported by boto3 client") logger.warning("Method get_object_attributes is not supported by boto3 client")
return {} return {}
@reporter.step_deco("Sync directory S3") @reporter.step("Sync directory S3")
@report_error @report_error
def sync( def sync(
self, self,
@ -661,7 +642,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
) -> dict: ) -> dict:
raise NotImplementedError("Sync is not supported for boto3 client") raise NotImplementedError("Sync is not supported for boto3 client")
@reporter.step_deco("CP directory S3") @reporter.step("CP directory S3")
@report_error @report_error
def cp( def cp(
self, self,

View file

@ -6,11 +6,10 @@ from typing import IO, Optional
import pexpect import pexpect
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell
logger = logging.getLogger("frostfs.testlib.shell") logger = logging.getLogger("frostfs.testlib.shell")
reporter = get_reporter()
class LocalShell(Shell): class LocalShell(Shell):

View file

@ -9,11 +9,10 @@ from typing import ClassVar, Optional, Tuple
from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception
from paramiko.ssh_exception import AuthenticationException from paramiko.ssh_exception import AuthenticationException
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials
logger = logging.getLogger("frostfs.testlib.shell") logger = logging.getLogger("frostfs.testlib.shell")
reporter = get_reporter()
class SshConnectionProvider: class SshConnectionProvider:

View file

@ -8,8 +8,8 @@ from typing import List, Optional, Union
import base58 import base58
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
@ -22,11 +22,10 @@ from frostfs_testlib.storage.dataclasses.acl import (
) )
from frostfs_testlib.utils import wallet_utils from frostfs_testlib.utils import wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Get extended ACL") @reporter.step("Get extended ACL")
def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]: def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
try: try:
@ -40,7 +39,7 @@ def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optiona
return result.stdout return result.stdout
@reporter.step_deco("Set extended ACL") @reporter.step("Set extended ACL")
def set_eacl( def set_eacl(
wallet_path: str, wallet_path: str,
cid: str, cid: str,
@ -165,24 +164,20 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]:
return rules return rules
def sign_bearer( def sign_bearer(shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None:
shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG)
) -> None:
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
frostfscli.util.sign_bearer_token( frostfscli.util.sign_bearer_token(
wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json
) )
@reporter.step_deco("Wait for eACL cache expired") @reporter.step("Wait for eACL cache expired")
def wait_for_cache_expired(): def wait_for_cache_expired():
sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT) sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT)
return return
@reporter.step_deco("Return bearer token in base64 to caller") @reporter.step("Return bearer token in base64 to caller")
def bearer_token_base64_from_file( def bearer_token_base64_from_file(
bearer_path: str, bearer_path: str,
) -> str: ) -> str:

View file

@ -5,8 +5,8 @@ from dataclasses import dataclass
from time import sleep from time import sleep
from typing import Optional, Union from typing import Optional, Union
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
@ -17,7 +17,6 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import json_utils from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -47,7 +46,7 @@ class StorageContainer:
def get_wallet_config_path(self) -> str: def get_wallet_config_path(self) -> str:
return self.storage_container_info.wallet_file.config_path return self.storage_container_info.wallet_file.config_path
@reporter.step_deco("Generate new object and put in container") @reporter.step("Generate new object and put in container")
def generate_object( def generate_object(
self, self,
size: int, size: int,
@ -103,7 +102,7 @@ SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
@reporter.step_deco("Create Container") @reporter.step("Create Container")
def create_container( def create_container(
wallet: str, wallet: str,
shell: Shell, shell: Shell,
@ -178,9 +177,7 @@ def wait_for_container_creation(
return return
logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue")
sleep(sleep_interval) sleep(sleep_interval)
raise RuntimeError( raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting")
f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting"
)
def wait_for_container_deletion( def wait_for_container_deletion(
@ -198,7 +195,7 @@ def wait_for_container_deletion(
raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.") raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.")
@reporter.step_deco("List Containers") @reporter.step("List Containers")
def list_containers( def list_containers(
wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT
) -> list[str]: ) -> list[str]:
@ -219,7 +216,7 @@ def list_containers(
return result.stdout.split() return result.stdout.split()
@reporter.step_deco("List Objects in container") @reporter.step("List Objects in container")
def list_objects( def list_objects(
wallet: str, wallet: str,
shell: Shell, shell: Shell,
@ -240,14 +237,12 @@ def list_objects(
(list): list of containers (list): list of containers
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.list_objects( result = cli.container.list_objects(rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout)
rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout
)
logger.info(f"Container objects: \n{result}") logger.info(f"Container objects: \n{result}")
return result.stdout.split() return result.stdout.split()
@reporter.step_deco("Get Container") @reporter.step("Get Container")
def get_container( def get_container(
wallet: str, wallet: str,
cid: str, cid: str,
@ -271,9 +266,7 @@ def get_container(
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.get( result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout)
rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout
)
if not json_mode: if not json_mode:
return result.stdout return result.stdout
@ -287,7 +280,7 @@ def get_container(
return container_info return container_info
@reporter.step_deco("Delete Container") @reporter.step("Delete Container")
# TODO: make the error message about a non-found container more user-friendly # TODO: make the error message about a non-found container more user-friendly
def delete_container( def delete_container(
wallet: str, wallet: str,
@ -350,7 +343,7 @@ def _parse_cid(output: str) -> str:
return splitted[1] return splitted[1]
@reporter.step_deco("Search container by name") @reporter.step("Search container by name")
def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str): def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str):
list_cids = list_containers(wallet, shell, endpoint) list_cids = list_containers(wallet, shell, endpoint)
for cid in list_cids: for cid in list_cids:
@ -360,7 +353,7 @@ def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str
return None return None
@reporter.step_deco("Search for nodes with a container") @reporter.step("Search for nodes with a container")
def search_nodes_with_container( def search_nodes_with_container(
wallet: str, wallet: str,
cid: str, cid: str,
@ -370,9 +363,7 @@ def search_nodes_with_container(
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[ClusterNode]: ) -> list[ClusterNode]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.search_node( result = cli.container.search_node(rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout)
rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout
)
pattern = r"[0-9]+(?:\.[0-9]+){3}" pattern = r"[0-9]+(?:\.[0-9]+){3}"
nodes_ip = list(set(re.findall(pattern, result.stdout))) nodes_ip = list(set(re.findall(pattern, result.stdout)))

View file

@ -5,9 +5,9 @@ import re
import uuid import uuid
from typing import Any, Optional from typing import Any, Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.cli.neogo import NeoGo from frostfs_testlib.cli.neogo import NeoGo
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
@ -16,10 +16,9 @@ from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
reporter = get_reporter()
@reporter.step_deco("Get object from random node") @reporter.step("Get object from random node")
def get_object_from_random_node( def get_object_from_random_node(
wallet: str, wallet: str,
cid: str, cid: str,
@ -70,7 +69,7 @@ def get_object_from_random_node(
) )
@reporter.step_deco("Get object from {endpoint}") @reporter.step("Get object from {endpoint}")
def get_object( def get_object(
wallet: str, wallet: str,
cid: str, cid: str,
@ -126,7 +125,7 @@ def get_object(
return file_path return file_path
@reporter.step_deco("Get Range Hash from {endpoint}") @reporter.step("Get Range Hash from {endpoint}")
def get_range_hash( def get_range_hash(
wallet: str, wallet: str,
cid: str, cid: str,
@ -176,7 +175,7 @@ def get_range_hash(
return result.stdout.split(":")[1].strip() return result.stdout.split(":")[1].strip()
@reporter.step_deco("Put object to random node") @reporter.step("Put object to random node")
def put_object_to_random_node( def put_object_to_random_node(
wallet: str, wallet: str,
path: str, path: str,
@ -235,7 +234,7 @@ def put_object_to_random_node(
) )
@reporter.step_deco("Put object at {endpoint} in container {cid}") @reporter.step("Put object at {endpoint} in container {cid}")
def put_object( def put_object(
wallet: str, wallet: str,
path: str, path: str,
@ -296,7 +295,7 @@ def put_object(
return oid.strip() return oid.strip()
@reporter.step_deco("Delete object {cid}/{oid} from {endpoint}") @reporter.step("Delete object {cid}/{oid} from {endpoint}")
def delete_object( def delete_object(
wallet: str, wallet: str,
cid: str, cid: str,
@ -344,7 +343,7 @@ def delete_object(
return tombstone.strip() return tombstone.strip()
@reporter.step_deco("Get Range") @reporter.step("Get Range")
def get_range( def get_range(
wallet: str, wallet: str,
cid: str, cid: str,
@ -397,7 +396,7 @@ def get_range(
return range_file_path, content return range_file_path, content
@reporter.step_deco("Lock Object") @reporter.step("Lock Object")
def lock_object( def lock_object(
wallet: str, wallet: str,
cid: str, cid: str,
@ -458,7 +457,7 @@ def lock_object(
return oid.strip() return oid.strip()
@reporter.step_deco("Search object") @reporter.step("Search object")
def search_object( def search_object(
wallet: str, wallet: str,
cid: str, cid: str,
@ -503,9 +502,7 @@ def search_object(
cid=cid, cid=cid,
bearer=bearer, bearer=bearer,
xhdr=xhdr, xhdr=xhdr,
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None,
if filters
else None,
session=session, session=session,
phy=phy, phy=phy,
root=root, root=root,
@ -517,19 +514,17 @@ def search_object(
if expected_objects_list: if expected_objects_list:
if sorted(found_objects) == sorted(expected_objects_list): if sorted(found_objects) == sorted(expected_objects_list):
logger.info( logger.info(
f"Found objects list '{found_objects}' " f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'"
f"is equal for expected list '{expected_objects_list}'"
) )
else: else:
logger.warning( logger.warning(
f"Found object list {found_objects} " f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'"
f"is not equal to expected list '{expected_objects_list}'"
) )
return found_objects return found_objects
@reporter.step_deco("Get netmap netinfo") @reporter.step("Get netmap netinfo")
def get_netmap_netinfo( def get_netmap_netinfo(
wallet: str, wallet: str,
shell: Shell, shell: Shell,
@ -581,7 +576,7 @@ def get_netmap_netinfo(
return settings return settings
@reporter.step_deco("Head object") @reporter.step("Head object")
def head_object( def head_object(
wallet: str, wallet: str,
cid: str, cid: str,
@ -677,7 +672,7 @@ def head_object(
return json_utils.decode_simple_header(decoded) return json_utils.decode_simple_header(decoded)
@reporter.step_deco("Run neo-go dump-keys") @reporter.step("Run neo-go dump-keys")
def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: def neo_go_dump_keys(shell: Shell, wallet: str) -> dict:
""" """
Run neo-go dump keys command Run neo-go dump keys command
@ -702,7 +697,7 @@ def neo_go_dump_keys(shell: Shell, wallet: str) -> dict:
return {address_id: wallet_key} return {address_id: wallet_key}
@reporter.step_deco("Run neo-go query height") @reporter.step("Run neo-go query height")
def neo_go_query_height(shell: Shell, endpoint: str) -> dict: def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
""" """
Run neo-go query height command Run neo-go query height command
@ -734,7 +729,7 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
} }
@reporter.step_deco("Search object nodes") @reporter.step("Search object nodes")
def get_object_nodes( def get_object_nodes(
cluster: Cluster, cluster: Cluster,
wallet: str, wallet: str,

View file

@ -12,7 +12,7 @@
import logging import logging
from typing import Optional, Tuple from typing import Optional, Tuple
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
@ -20,7 +20,6 @@ from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -113,7 +112,7 @@ def get_complex_object_split_ranges(
return ranges return ranges
@reporter.step_deco("Get Link Object") @reporter.step("Get Link Object")
def get_link_object( def get_link_object(
wallet: str, wallet: str,
cid: str, cid: str,
@ -166,7 +165,7 @@ def get_link_object(
return None return None
@reporter.step_deco("Get Last Object") @reporter.step("Get Last Object")
def get_last_object( def get_last_object(
wallet: str, wallet: str,
cid: str, cid: str,

View file

@ -2,8 +2,8 @@ import logging
from time import sleep from time import sleep
from typing import Optional from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import ( from frostfs_testlib.resources.cli import (
CLI_DEFAULT_TIMEOUT, CLI_DEFAULT_TIMEOUT,
FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_CONFIG_PATH,
@ -19,11 +19,10 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, Morp
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils, wallet_utils from frostfs_testlib.utils import datetime_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Get epochs from nodes") @reporter.step("Get epochs from nodes")
def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]:
""" """
Get current epochs on each node. Get current epochs on each node.
@ -41,10 +40,8 @@ def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]:
return epochs_by_node return epochs_by_node
@reporter.step_deco("Ensure fresh epoch") @reporter.step("Ensure fresh epoch")
def ensure_fresh_epoch( def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None) -> int:
shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None
) -> int:
# ensure new fresh epoch to avoid epoch switch during test session # ensure new fresh epoch to avoid epoch switch during test session
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
current_epoch = get_epoch(shell, cluster, alive_node) current_epoch = get_epoch(shell, cluster, alive_node)
@ -54,7 +51,7 @@ def ensure_fresh_epoch(
return epoch return epoch
@reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") @reporter.step("Wait up to {timeout} seconds for nodes on cluster to align epochs")
def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60):
@wait_for_success(timeout, 5, None, True) @wait_for_success(timeout, 5, None, True)
def check_epochs(): def check_epochs():
@ -64,7 +61,7 @@ def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60):
check_epochs() check_epochs()
@reporter.step_deco("Get Epoch") @reporter.step("Get Epoch")
def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
endpoint = alive_node.get_rpc_endpoint() endpoint = alive_node.get_rpc_endpoint()
@ -77,7 +74,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
return int(epoch.stdout) return int(epoch.stdout)
@reporter.step_deco("Tick Epoch") @reporter.step("Tick Epoch")
def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
""" """
Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv)

View file

@ -10,7 +10,7 @@ from urllib.parse import quote_plus
import requests import requests
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.s3.aws_cli_client import command_options
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
@ -21,15 +21,13 @@ from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.testing.test_control import retry from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils.file_utils import get_file_hash from frostfs_testlib.utils.file_utils import get_file_hash
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
local_shell = LocalShell() local_shell = LocalShell()
@reporter.step_deco("Get via HTTP Gate") @reporter.step("Get via HTTP Gate")
def get_via_http_gate( def get_via_http_gate(
cid: str, cid: str,
oid: str, oid: str,
@ -53,9 +51,7 @@ def get_via_http_gate(
else: else:
request = f"{endpoint}{request_path}" request = f"{endpoint}{request_path}"
resp = requests.get( resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False)
request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False
)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception(
@ -75,10 +71,8 @@ def get_via_http_gate(
return file_path return file_path
@reporter.step_deco("Get via Zip HTTP Gate") @reporter.step("Get via Zip HTTP Gate")
def get_via_zip_http_gate( def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300):
cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300
):
""" """
This function gets given object from HTTP gate This function gets given object from HTTP gate
cid: container id to get object from cid: container id to get object from
@ -111,7 +105,7 @@ def get_via_zip_http_gate(
return os.path.join(os.getcwd(), ASSETS_DIR, prefix) return os.path.join(os.getcwd(), ASSETS_DIR, prefix)
@reporter.step_deco("Get via HTTP Gate by attribute") @reporter.step("Get via HTTP Gate by attribute")
def get_via_http_gate_by_attribute( def get_via_http_gate_by_attribute(
cid: str, cid: str,
attribute: dict, attribute: dict,
@ -136,9 +130,7 @@ def get_via_http_gate_by_attribute(
else: else:
request = f"{endpoint}{request_path}" request = f"{endpoint}{request_path}"
resp = requests.get( resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname})
request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}
)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception(
@ -159,7 +151,7 @@ def get_via_http_gate_by_attribute(
# TODO: pass http_hostname as a header # TODO: pass http_hostname as a header
@reporter.step_deco("Upload via HTTP Gate") @reporter.step("Upload via HTTP Gate")
def upload_via_http_gate( def upload_via_http_gate(
cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300
) -> str: ) -> str:
@ -173,9 +165,7 @@ def upload_via_http_gate(
request = f"{endpoint}/upload/{cid}" request = f"{endpoint}/upload/{cid}"
files = {"upload_file": open(path, "rb")} files = {"upload_file": open(path, "rb")}
body = {"filename": path} body = {"filename": path}
resp = requests.post( resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False)
request, files=files, data=body, headers=headers, timeout=timeout, verify=False
)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception(
@ -193,7 +183,7 @@ def upload_via_http_gate(
return resp.json().get("object_id") return resp.json().get("object_id")
@reporter.step_deco("Check is the passed object large") @reporter.step("Check is the passed object large")
def is_object_large(filepath: str) -> bool: def is_object_large(filepath: str) -> bool:
""" """
This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE
@ -208,7 +198,7 @@ def is_object_large(filepath: str) -> bool:
# TODO: pass http_hostname as a header # TODO: pass http_hostname as a header
@reporter.step_deco("Upload via HTTP Gate using Curl") @reporter.step("Upload via HTTP Gate using Curl")
def upload_via_http_gate_curl( def upload_via_http_gate_curl(
cid: str, cid: str,
filepath: str, filepath: str,
@ -256,7 +246,7 @@ def upload_via_http_gate_curl(
@retry(max_attempts=3, sleep_interval=1) @retry(max_attempts=3, sleep_interval=1)
@reporter.step_deco("Get via HTTP Gate using Curl") @reporter.step("Get via HTTP Gate using Curl")
def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str:
""" """
This function gets given object from HTTP gate using curl utility. This function gets given object from HTTP gate using curl utility.
@ -280,7 +270,7 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"):
reporter.attach(command_attachment, f"{req_type} Request") reporter.attach(command_attachment, f"{req_type} Request")
@reporter.step_deco("Try to get object and expect error") @reporter.step("Try to get object and expect error")
def try_to_get_object_and_expect_error( def try_to_get_object_and_expect_error(
cid: str, cid: str,
oid: str, oid: str,
@ -296,7 +286,7 @@ def try_to_get_object_and_expect_error(
assert match, f"Expected {err} to match {error_pattern}" assert match, f"Expected {err} to match {error_pattern}"
@reporter.step_deco("Verify object can be get using HTTP header attribute") @reporter.step("Verify object can be get using HTTP header attribute")
def get_object_by_attr_and_verify_hashes( def get_object_by_attr_and_verify_hashes(
oid: str, oid: str,
file_name: str, file_name: str,
@ -305,9 +295,7 @@ def get_object_by_attr_and_verify_hashes(
endpoint: str, endpoint: str,
http_hostname: str, http_hostname: str,
) -> None: ) -> None:
got_file_path_http = get_via_http_gate( got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname)
cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname
)
got_file_path_http_attr = get_via_http_gate_by_attribute( got_file_path_http_attr = get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname
) )
@ -348,9 +336,7 @@ def verify_object_hash(
shell=shell, shell=shell,
endpoint=random_node.get_rpc_endpoint(), endpoint=random_node.get_rpc_endpoint(),
) )
got_file_path_http = object_getter( got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname)
cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname
)
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
@ -359,18 +345,14 @@ def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: st
msg = "Expected hashes are equal for files {f1} and {f2}" msg = "Expected hashes are equal for files {f1} and {f2}"
got_file_hash_http = get_file_hash(got_file_1) got_file_hash_http = get_file_hash(got_file_1)
assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1) assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1)
assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format( assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1)
f1=orig_file_name, f2=got_file_1
)
def attr_into_header(attrs: dict) -> dict: def attr_into_header(attrs: dict) -> dict:
return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()} return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()}
@reporter.step_deco( @reporter.step("Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'")
"Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'"
)
def attr_into_str_header_curl(attrs: dict) -> list: def attr_into_str_header_curl(attrs: dict) -> list:
headers = [] headers = []
for k, v in attrs.items(): for k, v in attrs.items():
@ -379,9 +361,7 @@ def attr_into_str_header_curl(attrs: dict) -> list:
return headers return headers
@reporter.step_deco( @reporter.step("Try to get object via http (pass http_request and optional attributes) and expect error")
"Try to get object via http (pass http_request and optional attributes) and expect error"
)
def try_to_get_object_via_passed_request_and_expect_error( def try_to_get_object_via_passed_request_and_expect_error(
cid: str, cid: str,
oid: str, oid: str,

View file

@ -1,9 +1,7 @@
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.test_control import retry from frostfs_testlib.testing.test_control import retry
reporter = get_reporter()
class IpTablesHelper: class IpTablesHelper:
@staticmethod @staticmethod
@ -21,11 +19,7 @@ class IpTablesHelper:
@staticmethod @staticmethod
def restore_input_traffic_to_port(node: ClusterNode) -> None: def restore_input_traffic_to_port(node: ClusterNode) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
ports = ( ports = shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'").stdout.strip().split("\n")
shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'")
.stdout.strip()
.split("\n")
)
if ports[0] == "": if ports[0] == "":
return return
for port in ports: for port in ports:
@ -34,11 +28,7 @@ class IpTablesHelper:
@staticmethod @staticmethod
def restore_input_traffic_to_node(node: ClusterNode) -> None: def restore_input_traffic_to_node(node: ClusterNode) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
unlock_ip = ( unlock_ip = shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'").stdout.strip().split("\n")
shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'")
.stdout.strip()
.split("\n")
)
if unlock_ip[0] == "": if unlock_ip[0] == "":
return return
for ip in unlock_ip: for ip in unlock_ip:
@ -47,17 +37,17 @@ class IpTablesHelper:
# TODO Move class to HOST # TODO Move class to HOST
class IfUpDownHelper: class IfUpDownHelper:
@reporter.step_deco("Down {interface} to {node}") @reporter.step("Down {interface} to {node}")
def down_interface(self, node: ClusterNode, interface: str) -> None: def down_interface(self, node: ClusterNode, interface: str) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
shell.exec(f"ifdown {interface}") shell.exec(f"ifdown {interface}")
@reporter.step_deco("Up {interface} to {node}") @reporter.step("Up {interface} to {node}")
def up_interface(self, node: ClusterNode, interface: str) -> None: def up_interface(self, node: ClusterNode, interface: str) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
shell.exec(f"ifup {interface}") shell.exec(f"ifup {interface}")
@reporter.step_deco("Up all interface to {node}") @reporter.step("Up all interface to {node}")
def up_all_interface(self, node: ClusterNode) -> None: def up_all_interface(self, node: ClusterNode) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
interfaces = list(node.host.config.interfaces.keys()) interfaces = list(node.host.config.interfaces.keys())
@ -65,7 +55,7 @@ class IfUpDownHelper:
for name_interface in interfaces: for name_interface in interfaces:
self.check_state_up(node, name_interface) self.check_state_up(node, name_interface)
@reporter.step_deco("Down all interface to {node}") @reporter.step("Down all interface to {node}")
def down_all_interface(self, node: ClusterNode) -> None: def down_all_interface(self, node: ClusterNode) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
interfaces = list(node.host.config.interfaces.keys()) interfaces = list(node.host.config.interfaces.keys())
@ -73,12 +63,10 @@ class IfUpDownHelper:
for name_interface in interfaces: for name_interface in interfaces:
self.check_state_down(node, name_interface) self.check_state_down(node, name_interface)
@reporter.step_deco("Check {node} to {interface}") @reporter.step("Check {node} to {interface}")
def check_state(self, node: ClusterNode, interface: str) -> str: def check_state(self, node: ClusterNode, interface: str) -> str:
shell = node.host.get_shell() shell = node.host.get_shell()
return shell.exec( return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip()
f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'"
).stdout.strip()
@retry(max_attempts=5, sleep_interval=5, expected_result="UP") @retry(max_attempts=5, sleep_interval=5, expected_result="UP")
def check_state_up(self, node: ClusterNode, interface: str) -> str: def check_state_up(self, node: ClusterNode, interface: str) -> str:

View file

@ -6,13 +6,9 @@ from dataclasses import dataclass
from time import sleep from time import sleep
from typing import Optional from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.cli import (
FROSTFS_ADM_CONFIG_PATH,
FROSTFS_ADM_EXEC,
FROSTFS_CLI_EXEC,
)
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
@ -20,7 +16,6 @@ from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -40,7 +35,7 @@ class HealthStatus:
return HealthStatus(network, health) return HealthStatus(network, health)
@reporter.step_deco("Get Locode from random storage node") @reporter.step("Get Locode from random storage node")
def get_locode_from_random_node(cluster: Cluster) -> str: def get_locode_from_random_node(cluster: Cluster) -> str:
node = random.choice(cluster.services(StorageNode)) node = random.choice(cluster.services(StorageNode))
locode = node.get_un_locode() locode = node.get_un_locode()
@ -48,7 +43,7 @@ def get_locode_from_random_node(cluster: Cluster) -> str:
return locode return locode
@reporter.step_deco("Healthcheck for storage node {node}") @reporter.step("Healthcheck for storage node {node}")
def storage_node_healthcheck(node: StorageNode) -> HealthStatus: def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
""" """
The function returns storage node's health status. The function returns storage node's health status.
@ -62,7 +57,7 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
return HealthStatus.from_stdout(output) return HealthStatus.from_stdout(output)
@reporter.step_deco("Set status for {node}") @reporter.step("Set status for {node}")
def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None:
""" """
The function sets particular status for given node. The function sets particular status for given node.
@ -75,7 +70,7 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) ->
_run_control_command_with_retries(node, command, retries) _run_control_command_with_retries(node, command, retries)
@reporter.step_deco("Get netmap snapshot") @reporter.step("Get netmap snapshot")
def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
""" """
The function returns string representation of netmap snapshot. The function returns string representation of netmap snapshot.
@ -95,7 +90,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
).stdout ).stdout
@reporter.step_deco("Get shard list for {node}") @reporter.step("Get shard list for {node}")
def node_shard_list(node: StorageNode) -> list[str]: def node_shard_list(node: StorageNode) -> list[str]:
""" """
The function returns list of shards for specified storage node. The function returns list of shards for specified storage node.
@ -109,7 +104,7 @@ def node_shard_list(node: StorageNode) -> list[str]:
return re.findall(r"Shard (.*):", output) return re.findall(r"Shard (.*):", output)
@reporter.step_deco("Shard set for {node}") @reporter.step("Shard set for {node}")
def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str:
""" """
The function sets mode for specified shard. The function sets mode for specified shard.
@ -120,7 +115,7 @@ def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str:
return _run_control_command_with_retries(node, command) return _run_control_command_with_retries(node, command)
@reporter.step_deco("Drop object from {node}") @reporter.step("Drop object from {node}")
def drop_object(node: StorageNode, cid: str, oid: str) -> str: def drop_object(node: StorageNode, cid: str, oid: str) -> str:
""" """
The function drops object from specified node. The function drops object from specified node.
@ -131,14 +126,14 @@ def drop_object(node: StorageNode, cid: str, oid: str) -> str:
return _run_control_command_with_retries(node, command) return _run_control_command_with_retries(node, command)
@reporter.step_deco("Delete data from host for node {node}") @reporter.step("Delete data from host for node {node}")
def delete_node_data(node: StorageNode) -> None: def delete_node_data(node: StorageNode) -> None:
node.stop_service() node.stop_service()
node.host.delete_storage_node_data(node.name) node.host.delete_storage_node_data(node.name)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@reporter.step_deco("Exclude node {node_to_exclude} from network map") @reporter.step("Exclude node {node_to_exclude} from network map")
def exclude_node_from_network_map( def exclude_node_from_network_map(
node_to_exclude: StorageNode, node_to_exclude: StorageNode,
alive_node: StorageNode, alive_node: StorageNode,
@ -154,12 +149,10 @@ def exclude_node_from_network_map(
wait_for_epochs_align(shell, cluster) wait_for_epochs_align(shell, cluster)
snapshot = get_netmap_snapshot(node=alive_node, shell=shell) snapshot = get_netmap_snapshot(node=alive_node, shell=shell)
assert ( assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be absent in network map"
node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} to be absent in network map"
@reporter.step_deco("Include node {node_to_include} into network map") @reporter.step("Include node {node_to_include} into network map")
def include_node_to_network_map( def include_node_to_network_map(
node_to_include: StorageNode, node_to_include: StorageNode,
alive_node: StorageNode, alive_node: StorageNode,
@ -178,37 +171,29 @@ def include_node_to_network_map(
check_node_in_map(node_to_include, shell, alive_node) check_node_in_map(node_to_include, shell, alive_node)
@reporter.step_deco("Check node {node} in network map") @reporter.step("Check node {node} in network map")
def check_node_in_map( def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
) -> None:
alive_node = alive_node or node alive_node = alive_node or node
node_netmap_key = node.get_wallet_public_key() node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(alive_node, shell) snapshot = get_netmap_snapshot(alive_node, shell)
assert ( assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map"
node_netmap_key in snapshot
), f"Expected node with key {node_netmap_key} to be in network map"
@reporter.step_deco("Check node {node} NOT in network map") @reporter.step("Check node {node} NOT in network map")
def check_node_not_in_map( def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
) -> None:
alive_node = alive_node or node alive_node = alive_node or node
node_netmap_key = node.get_wallet_public_key() node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(alive_node, shell) snapshot = get_netmap_snapshot(alive_node, shell)
assert ( assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be NOT in network map"
node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} to be NOT in network map"
@reporter.step_deco("Wait for node {node} is ready") @reporter.step("Wait for node {node} is ready")
def wait_for_node_to_be_ready(node: StorageNode) -> None: def wait_for_node_to_be_ready(node: StorageNode) -> None:
timeout, attempts = 30, 6 timeout, attempts = 30, 6
for _ in range(attempts): for _ in range(attempts):
@ -219,12 +204,10 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None:
except Exception as err: except Exception as err:
logger.warning(f"Node {node} is not ready:\n{err}") logger.warning(f"Node {node} is not ready:\n{err}")
sleep(timeout) sleep(timeout)
raise AssertionError( raise AssertionError(f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds")
f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds"
)
@reporter.step_deco("Remove nodes from network map trough cli-adm morph command") @reporter.step("Remove nodes from network map trough cli-adm morph command")
def remove_nodes_from_map_morph( def remove_nodes_from_map_morph(
shell: Shell, shell: Shell,
cluster: Cluster, cluster: Cluster,

View file

@ -8,21 +8,21 @@ from typing import Optional
from neo3.wallet import utils as neo3_utils from neo3.wallet import utils as neo3_utils
from neo3.wallet import wallet as neo3_wallet from neo3.wallet import wallet as neo3_wallet
from frostfs_testlib import reporter
from frostfs_testlib.cli import NeoGo from frostfs_testlib.cli import NeoGo
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE
from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain
from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
EMPTY_PASSWORD = "" EMPTY_PASSWORD = ""
TX_PERSIST_TIMEOUT = 15 # seconds TX_PERSIST_TIMEOUT = 15 # seconds
ASSET_POWER_SIDECHAIN = 10**12 ASSET_POWER_SIDECHAIN = 10**12
def get_nns_contract_hash(morph_chain: MorphChain) -> str: def get_nns_contract_hash(morph_chain: MorphChain) -> str:
return morph_chain.rpc_client.get_contract_state(1)["hash"] return morph_chain.rpc_client.get_contract_state(1)["hash"]
@ -39,6 +39,7 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell)
stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"]
return bytes.decode(base64.b64decode(stack_data[0]["value"])) return bytes.decode(base64.b64decode(stack_data[0]["value"]))
def transaction_accepted(morph_chain: MorphChain, tx_id: str): def transaction_accepted(morph_chain: MorphChain, tx_id: str):
""" """
This function returns True in case of accepted TX. This function returns True in case of accepted TX.
@ -62,7 +63,7 @@ def transaction_accepted(morph_chain: MorphChain, tx_id: str):
return False return False
@reporter.step_deco("Get FrostFS Balance") @reporter.step("Get FrostFS Balance")
def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""):
""" """
This function returns FrostFS balance for given wallet. This function returns FrostFS balance for given wallet.
@ -82,7 +83,8 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_
logger.error(f"failed to get wallet balance: {out}") logger.error(f"failed to get wallet balance: {out}")
raise out raise out
@reporter.step_deco("Transfer Gas")
@reporter.step("Transfer Gas")
def transfer_gas( def transfer_gas(
shell: Shell, shell: Shell,
amount: int, amount: int,
@ -111,16 +113,10 @@ def transfer_gas(
""" """
wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() wallet_from_path = wallet_from_path or morph_chain.get_wallet_path()
wallet_from_password = ( wallet_from_password = (
wallet_from_password wallet_from_password if wallet_from_password is not None else morph_chain.get_wallet_password()
if wallet_from_password is not None
else morph_chain.get_wallet_password()
)
address_from = address_from or wallet_utils.get_last_address_from_wallet(
wallet_from_path, wallet_from_password
)
address_to = address_to or wallet_utils.get_last_address_from_wallet(
wallet_to_path, wallet_to_password
) )
address_from = address_from or wallet_utils.get_last_address_from_wallet(wallet_from_path, wallet_from_password)
address_to = address_to or wallet_utils.get_last_address_from_wallet(wallet_to_path, wallet_to_password)
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.nep17.transfer( out = neogo.nep17.transfer(
@ -141,7 +137,7 @@ def transfer_gas(
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@reporter.step_deco("Get Sidechain Balance") @reporter.step("Get Sidechain Balance")
def get_sidechain_balance(morph_chain: MorphChain, address: str): def get_sidechain_balance(morph_chain: MorphChain, address: str):
resp = morph_chain.rpc_client.get_nep17_balances(address=address) resp = morph_chain.rpc_client.get_nep17_balances(address=address)
logger.info(f"Got getnep17balances response: {resp}") logger.info(f"Got getnep17balances response: {resp}")

View file

@ -8,27 +8,23 @@ from typing import Optional
from dateutil.parser import parse from dateutil.parser import parse
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAuthmate from frostfs_testlib.cli import FrostfsAuthmate
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell
from frostfs_testlib.shell.interfaces import SshCredentials from frostfs_testlib.shell.interfaces import SshCredentials
from frostfs_testlib.steps.cli.container import ( from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container
search_container_by_name,
search_nodes_with_container,
)
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils.cli_utils import _run_with_passwd from frostfs_testlib.utils.cli_utils import _run_with_passwd
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Expected all objects are presented in the bucket") @reporter.step("Expected all objects are presented in the bucket")
def check_objects_in_bucket( def check_objects_in_bucket(
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket: str, bucket: str,
@ -37,13 +33,9 @@ def check_objects_in_bucket(
) -> None: ) -> None:
unexpected_objects = unexpected_objects or [] unexpected_objects = unexpected_objects or []
bucket_objects = s3_client.list_objects(bucket) bucket_objects = s3_client.list_objects(bucket)
assert len(bucket_objects) == len( assert len(bucket_objects) == len(expected_objects), f"Expected {len(expected_objects)} objects in the bucket"
expected_objects
), f"Expected {len(expected_objects)} objects in the bucket"
for bucket_object in expected_objects: for bucket_object in expected_objects:
assert ( assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}"
bucket_object in bucket_objects
), f"Expected object {bucket_object} in objects list {bucket_objects}"
for bucket_object in unexpected_objects: for bucket_object in unexpected_objects:
assert ( assert (
@ -51,21 +43,17 @@ def check_objects_in_bucket(
), f"Expected object {bucket_object} not in objects list {bucket_objects}" ), f"Expected object {bucket_object} not in objects list {bucket_objects}"
@reporter.step_deco("Try to get object and got error") @reporter.step("Try to get object and got error")
def try_to_get_objects_and_expect_error( def try_to_get_objects_and_expect_error(s3_client: S3ClientWrapper, bucket: str, object_keys: list) -> None:
s3_client: S3ClientWrapper, bucket: str, object_keys: list
) -> None:
for obj in object_keys: for obj in object_keys:
try: try:
s3_client.get_object(bucket, obj) s3_client.get_object(bucket, obj)
raise AssertionError(f"Object {obj} found in bucket {bucket}") raise AssertionError(f"Object {obj} found in bucket {bucket}")
except Exception as err: except Exception as err:
assert "The specified key does not exist" in str( assert "The specified key does not exist" in str(err), f"Expected error in exception {err}"
err
), f"Expected error in exception {err}"
@reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'") @reporter.step("Set versioning status to '{status}' for bucket '{bucket}'")
def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus):
if status == VersioningStatus.UNDEFINED: if status == VersioningStatus.UNDEFINED:
return return
@ -83,12 +71,8 @@ def object_key_from_file_path(full_path: str) -> str:
def assert_tags( def assert_tags(
actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None
) -> None: ) -> None:
expected_tags = ( expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
[{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
)
unexpected_tags = (
[{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
)
if expected_tags == []: if expected_tags == []:
assert not actual_tags, f"Expected there is no tags, got {actual_tags}" assert not actual_tags, f"Expected there is no tags, got {actual_tags}"
assert len(expected_tags) == len(actual_tags) assert len(expected_tags) == len(actual_tags)
@ -98,7 +82,7 @@ def assert_tags(
assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}"
@reporter.step_deco("Expected all tags are presented in object") @reporter.step("Expected all tags are presented in object")
def check_tags_by_object( def check_tags_by_object(
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket: str, bucket: str,
@ -107,12 +91,10 @@ def check_tags_by_object(
unexpected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None,
) -> None: ) -> None:
actual_tags = s3_client.get_object_tagging(bucket, key) actual_tags = s3_client.get_object_tagging(bucket, key)
assert_tags( assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags)
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
@reporter.step_deco("Expected all tags are presented in bucket") @reporter.step("Expected all tags are presented in bucket")
def check_tags_by_bucket( def check_tags_by_bucket(
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket: str, bucket: str,
@ -120,9 +102,7 @@ def check_tags_by_bucket(
unexpected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None,
) -> None: ) -> None:
actual_tags = s3_client.get_bucket_tagging(bucket) actual_tags = s3_client.get_bucket_tagging(bucket)
assert_tags( assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags)
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
def assert_object_lock_mode( def assert_object_lock_mode(
@ -135,25 +115,19 @@ def assert_object_lock_mode(
retain_period: Optional[int] = None, retain_period: Optional[int] = None,
): ):
object_dict = s3_client.get_object(bucket, file_name, full_output=True) object_dict = s3_client.get_object(bucket, file_name, full_output=True)
assert ( assert object_dict.get("ObjectLockMode") == object_lock_mode, f"Expected Object Lock Mode is {object_lock_mode}"
object_dict.get("ObjectLockMode") == object_lock_mode
), f"Expected Object Lock Mode is {object_lock_mode}"
assert ( assert (
object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status
), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}"
object_retain_date = object_dict.get("ObjectLockRetainUntilDate") object_retain_date = object_dict.get("ObjectLockRetainUntilDate")
retain_date = ( retain_date = parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date
parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date
)
if retain_until_date: if retain_until_date:
assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime(
"%Y-%m-%dT%H:%M:%S" "%Y-%m-%dT%H:%M:%S"
), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}'
elif retain_period: elif retain_period:
last_modify_date = object_dict.get("LastModified") last_modify_date = object_dict.get("LastModified")
last_modify = ( last_modify = parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date
parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date
)
assert ( assert (
retain_date - last_modify + timedelta(seconds=1) retain_date - last_modify + timedelta(seconds=1)
).days == retain_period, f"Expected retention period is {retain_period} days" ).days == retain_period, f"Expected retention period is {retain_period} days"
@ -187,7 +161,7 @@ def assert_s3_acl(acl_grants: list, permitted_users: str):
logger.error("FULL_CONTROL is given to All Users") logger.error("FULL_CONTROL is given to All Users")
@reporter.step_deco("Init S3 Credentials") @reporter.step("Init S3 Credentials")
def init_s3_credentials( def init_s3_credentials(
wallet: WalletInfo, wallet: WalletInfo,
shell: Shell, shell: Shell,
@ -213,24 +187,18 @@ def init_s3_credentials(
container_placement_policy=container_placement_policy, container_placement_policy=container_placement_policy,
).stdout ).stdout
aws_access_key_id = str( aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group( re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group("aws_access_key_id")
"aws_access_key_id"
)
) )
aws_secret_access_key = str( aws_secret_access_key = str(
re.search( re.search(r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output).group(
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output "aws_secret_access_key"
).group("aws_secret_access_key")
)
cid = str(
re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group(
"container_id"
) )
) )
cid = str(re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group("container_id"))
return cid, aws_access_key_id, aws_secret_access_key return cid, aws_access_key_id, aws_secret_access_key
@reporter.step_deco("Delete bucket with all objects") @reporter.step("Delete bucket with all objects")
def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
versioning_status = s3_client.get_bucket_versioning_status(bucket) versioning_status = s3_client.get_bucket_versioning_status(bucket)
if versioning_status == VersioningStatus.ENABLED.value: if versioning_status == VersioningStatus.ENABLED.value:
@ -255,7 +223,7 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
s3_client.delete_bucket(bucket) s3_client.delete_bucket(bucket)
@reporter.step_deco("Search nodes bucket") @reporter.step("Search nodes bucket")
def search_nodes_with_bucket( def search_nodes_with_bucket(
cluster: Cluster, cluster: Cluster,
bucket_name: str, bucket_name: str,
@ -264,7 +232,5 @@ def search_nodes_with_bucket(
endpoint: str, endpoint: str,
) -> list[ClusterNode]: ) -> list[ClusterNode]:
cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint) cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint)
nodes_list = search_nodes_with_container( nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster)
wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster
)
return nodes_list return nodes_list

View file

@ -7,8 +7,8 @@ from dataclasses import dataclass
from enum import Enum from enum import Enum
from typing import Any, Optional from typing import Any, Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
@ -17,7 +17,6 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.testing.readable import HumanReadableEnum
from frostfs_testlib.utils import json_utils, wallet_utils from frostfs_testlib.utils import json_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
UNRELATED_KEY = "unrelated key in the session" UNRELATED_KEY = "unrelated key in the session"
@ -50,7 +49,7 @@ class Lifetime:
iat: int = 0 iat: int = 0
@reporter.step_deco("Generate Session Token") @reporter.step("Generate Session Token")
def generate_session_token( def generate_session_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
session_wallet: WalletInfo, session_wallet: WalletInfo,
@ -72,9 +71,7 @@ def generate_session_token(
file_path = os.path.join(tokens_dir, str(uuid.uuid4())) file_path = os.path.join(tokens_dir, str(uuid.uuid4()))
pub_key_64 = wallet_utils.get_wallet_public_key( pub_key_64 = wallet_utils.get_wallet_public_key(session_wallet.path, session_wallet.password, "base64")
session_wallet.path, session_wallet.password, "base64"
)
lifetime = lifetime or Lifetime() lifetime = lifetime or Lifetime()
@ -99,7 +96,7 @@ def generate_session_token(
return file_path return file_path
@reporter.step_deco("Generate Session Token For Container") @reporter.step("Generate Session Token For Container")
def generate_container_session_token( def generate_container_session_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
session_wallet: WalletInfo, session_wallet: WalletInfo,
@ -126,11 +123,7 @@ def generate_container_session_token(
"container": { "container": {
"verb": verb.value, "verb": verb.value,
"wildcard": cid is None, "wildcard": cid is None,
**( **({"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} if cid is not None else {}),
{"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}}
if cid is not None
else {}
),
}, },
} }
@ -143,7 +136,7 @@ def generate_container_session_token(
) )
@reporter.step_deco("Generate Session Token For Object") @reporter.step("Generate Session Token For Object")
def generate_object_session_token( def generate_object_session_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
session_wallet: WalletInfo, session_wallet: WalletInfo,
@ -185,7 +178,7 @@ def generate_object_session_token(
) )
@reporter.step_deco("Get signed token for container session") @reporter.step("Get signed token for container session")
def get_container_signed_token( def get_container_signed_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
user_wallet: WalletInfo, user_wallet: WalletInfo,
@ -207,7 +200,7 @@ def get_container_signed_token(
return sign_session_token(shell, session_token_file, owner_wallet) return sign_session_token(shell, session_token_file, owner_wallet)
@reporter.step_deco("Get signed token for object session") @reporter.step("Get signed token for object session")
def get_object_signed_token( def get_object_signed_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
user_wallet: WalletInfo, user_wallet: WalletInfo,
@ -234,7 +227,7 @@ def get_object_signed_token(
return sign_session_token(shell, session_token_file, owner_wallet) return sign_session_token(shell, session_token_file, owner_wallet)
@reporter.step_deco("Create Session Token") @reporter.step("Create Session Token")
def create_session_token( def create_session_token(
shell: Shell, shell: Shell,
owner: str, owner: str,
@ -265,7 +258,7 @@ def create_session_token(
return session_token return session_token
@reporter.step_deco("Sign Session Token") @reporter.step("Sign Session Token")
def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str: def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str:
""" """
This function signs the session token by the given wallet. This function signs the session token by the given wallet.
@ -279,10 +272,6 @@ def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -
The path to the signed token. The path to the signed token.
""" """
signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli( frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG)
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG frostfscli.util.sign_session_token(wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file)
)
frostfscli.util.sign_session_token(
wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file
)
return signed_token_file return signed_token_file

View file

@ -3,7 +3,7 @@ from time import sleep
import pytest import pytest
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import delete_object, get_object from frostfs_testlib.steps.cli.object import delete_object, get_object
@ -12,16 +12,13 @@ from frostfs_testlib.steps.tombstone import verify_head_tombstone
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
CLEANUP_TIMEOUT = 10 CLEANUP_TIMEOUT = 10
@reporter.step_deco("Delete Objects") @reporter.step("Delete Objects")
def delete_objects( def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster) -> None:
storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster
) -> None:
""" """
Deletes given storage objects. Deletes given storage objects.

View file

@ -6,7 +6,7 @@
""" """
import logging import logging
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.cli.object import head_object
@ -14,14 +14,11 @@ from frostfs_testlib.steps.complex_object_actions import get_last_object
from frostfs_testlib.storage.cluster import StorageNode from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.utils import string_utils from frostfs_testlib.utils import string_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Get Object Copies") @reporter.step("Get Object Copies")
def get_object_copies( def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
The function performs requests to all nodes of the container and The function performs requests to all nodes of the container and
finds out if they store a copy of the object. The procedure is finds out if they store a copy of the object. The procedure is
@ -45,10 +42,8 @@ def get_object_copies(
) )
@reporter.step_deco("Get Simple Object Copies") @reporter.step("Get Simple Object Copies")
def get_simple_object_copies( def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
To figure out the number of a simple object copies, only direct To figure out the number of a simple object copies, only direct
HEAD requests should be made to the every node of the container. HEAD requests should be made to the every node of the container.
@ -66,9 +61,7 @@ def get_simple_object_copies(
copies = 0 copies = 0
for node in nodes: for node in nodes:
try: try:
response = head_object( response = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True)
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
)
if response: if response:
logger.info(f"Found object {oid} on node {node}") logger.info(f"Found object {oid} on node {node}")
copies += 1 copies += 1
@ -78,10 +71,8 @@ def get_simple_object_copies(
return copies return copies
@reporter.step_deco("Get Complex Object Copies") @reporter.step("Get Complex Object Copies")
def get_complex_object_copies( def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
To figure out the number of a complex object copies, we firstly To figure out the number of a complex object copies, we firstly
need to retrieve its Last object. We consider that the number of need to retrieve its Last object. We consider that the number of
@ -102,10 +93,8 @@ def get_complex_object_copies(
return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) return get_simple_object_copies(wallet, cid, last_oid, shell, nodes)
@reporter.step_deco("Get Nodes With Object") @reporter.step("Get Nodes With Object")
def get_nodes_with_object( def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]:
cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
""" """
The function returns list of nodes which store The function returns list of nodes which store
the given object. the given object.
@ -141,7 +130,7 @@ def get_nodes_with_object(
return nodes_list return nodes_list
@reporter.step_deco("Get Nodes Without Object") @reporter.step("Get Nodes Without Object")
def get_nodes_without_object( def get_nodes_without_object(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]: ) -> list[StorageNode]:
@ -160,9 +149,7 @@ def get_nodes_without_object(
nodes_list = [] nodes_list = []
for node in nodes: for node in nodes:
try: try:
res = head_object( res = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True)
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
)
if res is None: if res is None:
nodes_list.append(node) nodes_list.append(node)
except Exception as err: except Exception as err:

View file

@ -3,18 +3,15 @@ import logging
from neo3.wallet import wallet from neo3.wallet import wallet
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.cli.object import head_object
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Verify Head Tombstone") @reporter.step("Verify Head Tombstone")
def verify_head_tombstone( def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str):
wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str
):
header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
@ -30,12 +27,6 @@ def verify_head_tombstone(
assert header["ownerID"] == addr, "Tombstone Owner ID is wrong" assert header["ownerID"] == addr, "Tombstone Owner ID is wrong"
assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone"
assert ( assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE"
header["sessionToken"]["body"]["object"]["verb"] == "DELETE" assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong"
), "Header Session Type isn't DELETE" assert oid in header["sessionToken"]["body"]["object"]["target"]["objects"], "Header Session OID is wrong"
assert (
header["sessionToken"]["body"]["object"]["target"]["container"] == cid
), "Header Session ID is wrong"
assert (
oid in header["sessionToken"]["body"]["object"]["target"]["objects"]
), "Header Session OID is wrong"

View file

@ -4,9 +4,9 @@ import re
import yaml import yaml
from yarl import URL from yarl import URL
from frostfs_testlib import reporter
from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting import Host, Hosting
from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.config import ServiceConfig
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage import get_service_registry
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration
@ -16,8 +16,6 @@ from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.service_registry import ServiceRegistry from frostfs_testlib.storage.service_registry import ServiceRegistry
reporter = get_reporter()
class ClusterNode: class ClusterNode:
""" """

View file

@ -4,13 +4,11 @@ from typing import Any
import yaml import yaml
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.shell.interfaces import CommandOptions from frostfs_testlib.shell.interfaces import CommandOptions
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.storage.dataclasses.node_base import ServiceClass
reporter = get_reporter()
class ServiceConfiguration(ServiceConfigurationYml): class ServiceConfiguration(ServiceConfigurationYml):
def __init__(self, service: "ServiceClass") -> None: def __init__(self, service: "ServiceClass") -> None:

View file

@ -2,18 +2,16 @@ import copy
from typing import Optional from typing import Optional
import frostfs_testlib.resources.optionals as optionals import frostfs_testlib.resources.optionals as optionals
from frostfs_testlib import reporter
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType
from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_report import LoadReport
from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.load.load_verifiers import LoadVerifier
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.test_control import run_optionally from frostfs_testlib.testing.test_control import run_optionally
reporter = get_reporter()
class BackgroundLoadController: class BackgroundLoadController:
k6_dir: str k6_dir: str
@ -86,7 +84,7 @@ class BackgroundLoadController:
return all_endpoints[load_type][endpoint_selection_strategy] return all_endpoints[load_type][endpoint_selection_strategy]
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Prepare load instances") @reporter.step("Prepare load instances")
def prepare(self): def prepare(self):
self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy)
self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir)
@ -99,7 +97,7 @@ class BackgroundLoadController:
self.started = True self.started = True
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Stop load") @reporter.step("Stop load")
def stop(self): def stop(self):
self.runner.stop() self.runner.stop()
@ -108,7 +106,7 @@ class BackgroundLoadController:
return self.runner.is_running return self.runner.is_running
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Reset load") @reporter.step("Reset load")
def _reset_for_consequent_load(self): def _reset_for_consequent_load(self):
"""This method is required if we want to run multiple loads during test run. """This method is required if we want to run multiple loads during test run.
Raise load counter by 1 and append it to load_id Raise load counter by 1 and append it to load_id
@ -118,7 +116,7 @@ class BackgroundLoadController:
self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}")
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Startup load") @reporter.step("Startup load")
def startup(self): def startup(self):
self.prepare() self.prepare()
self.preset() self.preset()
@ -129,7 +127,7 @@ class BackgroundLoadController:
self.runner.preset() self.runner.preset()
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Stop and get results of load") @reporter.step("Stop and get results of load")
def teardown(self, load_report: Optional[LoadReport] = None): def teardown(self, load_report: Optional[LoadReport] = None):
if not self.started: if not self.started:
return return
@ -141,7 +139,7 @@ class BackgroundLoadController:
load_report.add_summaries(self.load_summaries) load_report.add_summaries(self.load_summaries)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Run post-load verification") @reporter.step("Run post-load verification")
def verify(self): def verify(self):
try: try:
load_issues = self._collect_load_issues() load_issues = self._collect_load_issues()
@ -153,7 +151,7 @@ class BackgroundLoadController:
self._reset_for_consequent_load() self._reset_for_consequent_load()
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Collect load issues") @reporter.step("Collect load issues")
def _collect_load_issues(self): def _collect_load_issues(self):
verifier = LoadVerifier(self.load_params) verifier = LoadVerifier(self.load_params)
return verifier.collect_load_issues(self.load_summaries) return verifier.collect_load_issues(self.load_summaries)
@ -163,7 +161,7 @@ class BackgroundLoadController:
self.runner.wait_until_finish(soft_timeout) self.runner.wait_until_finish(soft_timeout)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Verify loaded objects") @reporter.step("Verify loaded objects")
def _run_verify_scenario(self) -> list[str]: def _run_verify_scenario(self) -> list[str]:
self.verification_params = LoadParams( self.verification_params = LoadParams(
verify_clients=self.load_params.verify_clients, verify_clients=self.load_params.verify_clients,

View file

@ -4,12 +4,12 @@ import time
from typing import TypeVar from typing import TypeVar
import frostfs_testlib.resources.optionals as optionals import frostfs_testlib.resources.optionals as optionals
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.cli.netmap_parser import NetmapParser
from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.hosting.interfaces import HostStatus from frostfs_testlib.hosting.interfaces import HostStatus
from frostfs_testlib.plugins import load_all from frostfs_testlib.plugins import load_all
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
@ -21,7 +21,6 @@ from frostfs_testlib.testing import parallel
from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success
from frostfs_testlib.utils.datetime_utils import parse_time from frostfs_testlib.utils.datetime_utils import parse_time
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
if_up_down_helper = IfUpDownHelper() if_up_down_helper = IfUpDownHelper()
@ -76,7 +75,7 @@ class ClusterStateController:
return online_svc return online_svc
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop host of node {node}") @reporter.step("Stop host of node {node}")
def stop_node_host(self, node: ClusterNode, mode: str): def stop_node_host(self, node: ClusterNode, mode: str):
# Drop ssh connection for this node before shutdown # Drop ssh connection for this node before shutdown
provider = SshConnectionProvider() provider = SshConnectionProvider()
@ -88,7 +87,7 @@ class ClusterStateController:
self._wait_for_host_offline(node) self._wait_for_host_offline(node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Shutdown whole cluster") @reporter.step("Shutdown whole cluster")
def shutdown_cluster(self, mode: str, reversed_order: bool = False): def shutdown_cluster(self, mode: str, reversed_order: bool = False):
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
@ -105,7 +104,7 @@ class ClusterStateController:
self._wait_for_host_offline(node) self._wait_for_host_offline(node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start host of node {node}") @reporter.step("Start host of node {node}")
def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True): def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True):
with reporter.step(f"Start host {node.host.config.address}"): with reporter.step(f"Start host {node.host.config.address}"):
node.host.start_host() node.host.start_host()
@ -115,7 +114,7 @@ class ClusterStateController:
self.wait_startup_healthcheck() self.wait_startup_healthcheck()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start stopped hosts") @reporter.step("Start stopped hosts")
def start_stopped_hosts(self, reversed_order: bool = False): def start_stopped_hosts(self, reversed_order: bool = False):
if not self.stopped_nodes: if not self.stopped_nodes:
return return
@ -133,35 +132,35 @@ class ClusterStateController:
self.wait_after_storage_startup() self.wait_after_storage_startup()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}") @reporter.step("Detach disk {device} at {mountpoint} on node {node}")
def detach_disk(self, node: StorageNode, device: str, mountpoint: str): def detach_disk(self, node: StorageNode, device: str, mountpoint: str):
disk_controller = self._get_disk_controller(node, device, mountpoint) disk_controller = self._get_disk_controller(node, device, mountpoint)
self.detached_disks[disk_controller.id] = disk_controller self.detached_disks[disk_controller.id] = disk_controller
disk_controller.detach() disk_controller.detach()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}") @reporter.step("Attach disk {device} at {mountpoint} on node {node}")
def attach_disk(self, node: StorageNode, device: str, mountpoint: str): def attach_disk(self, node: StorageNode, device: str, mountpoint: str):
disk_controller = self._get_disk_controller(node, device, mountpoint) disk_controller = self._get_disk_controller(node, device, mountpoint)
disk_controller.attach() disk_controller.attach()
self.detached_disks.pop(disk_controller.id, None) self.detached_disks.pop(disk_controller.id, None)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Restore detached disks") @reporter.step("Restore detached disks")
def restore_disks(self): def restore_disks(self):
for disk_controller in self.detached_disks.values(): for disk_controller in self.detached_disks.values():
disk_controller.attach() disk_controller.attach()
self.detached_disks = {} self.detached_disks = {}
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop all {service_type} services") @reporter.step("Stop all {service_type} services")
def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True): def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True):
services = self.cluster.services(service_type) services = self.cluster.services(service_type)
self.stopped_services.update(services) self.stopped_services.update(services)
parallel([service.stop_service for service in services], mask=mask) parallel([service.stop_service for service in services], mask=mask)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start all {service_type} services") @reporter.step("Start all {service_type} services")
def start_services_of_type(self, service_type: type[ServiceClass]): def start_services_of_type(self, service_type: type[ServiceClass]):
services = self.cluster.services(service_type) services = self.cluster.services(service_type)
parallel([service.start_service for service in services]) parallel([service.start_service for service in services])
@ -176,24 +175,24 @@ class ClusterStateController:
result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes")
assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node" assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node"
@reporter.step_deco("Wait for S3Gates reconnection to local storage") @reporter.step("Wait for S3Gates reconnection to local storage")
def wait_s3gates(self): def wait_s3gates(self):
online_s3gates = self._get_online(S3Gate) online_s3gates = self._get_online(S3Gate)
if online_s3gates: if online_s3gates:
parallel(self.wait_s3gate, online_s3gates) parallel(self.wait_s3gate, online_s3gates)
@reporter.step_deco("Wait for cluster startup healtcheck") @reporter.step("Wait for cluster startup healtcheck")
def wait_startup_healthcheck(self): def wait_startup_healthcheck(self):
nodes = self.cluster.nodes(self._get_online(StorageNode)) nodes = self.cluster.nodes(self._get_online(StorageNode))
parallel(self.healthcheck.startup_healthcheck, nodes) parallel(self.healthcheck.startup_healthcheck, nodes)
@reporter.step_deco("Wait for storage reconnection to the system") @reporter.step("Wait for storage reconnection to the system")
def wait_after_storage_startup(self): def wait_after_storage_startup(self):
self.wait_startup_healthcheck() self.wait_startup_healthcheck()
self.wait_s3gates() self.wait_s3gates()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start all stopped services") @reporter.step("Start all stopped services")
def start_all_stopped_services(self): def start_all_stopped_services(self):
stopped_storages = self._get_stopped_by_type(StorageNode) stopped_storages = self._get_stopped_by_type(StorageNode)
parallel([service.start_service for service in self.stopped_services]) parallel([service.start_service for service in self.stopped_services])
@ -203,21 +202,21 @@ class ClusterStateController:
self.wait_after_storage_startup() self.wait_after_storage_startup()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop {service_type} service on {node}") @reporter.step("Stop {service_type} service on {node}")
def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True):
service = node.service(service_type) service = node.service(service_type)
service.stop_service(mask) service.stop_service(mask)
self.stopped_services.add(service) self.stopped_services.add(service)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start {service_type} service on {node}") @reporter.step("Start {service_type} service on {node}")
def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]):
service = node.service(service_type) service = node.service(service_type)
service.start_service() service.start_service()
self.stopped_services.discard(service) self.stopped_services.discard(service)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start all stopped {service_type} services") @reporter.step("Start all stopped {service_type} services")
def start_stopped_services_of_type(self, service_type: type[ServiceClass]): def start_stopped_services_of_type(self, service_type: type[ServiceClass]):
stopped_svc = self._get_stopped_by_type(service_type) stopped_svc = self._get_stopped_by_type(service_type)
if not stopped_svc: if not stopped_svc:
@ -231,7 +230,7 @@ class ClusterStateController:
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop all storage services on cluster") @reporter.step("Stop all storage services on cluster")
def stop_all_storage_services(self, reversed_order: bool = False): def stop_all_storage_services(self, reversed_order: bool = False):
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
@ -240,7 +239,7 @@ class ClusterStateController:
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop all S3 gates on cluster") @reporter.step("Stop all S3 gates on cluster")
def stop_all_s3_gates(self, reversed_order: bool = False): def stop_all_s3_gates(self, reversed_order: bool = False):
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
@ -249,42 +248,42 @@ class ClusterStateController:
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop storage service on {node}") @reporter.step("Stop storage service on {node}")
def stop_storage_service(self, node: ClusterNode, mask: bool = True): def stop_storage_service(self, node: ClusterNode, mask: bool = True):
self.stop_service_of_type(node, StorageNode, mask) self.stop_service_of_type(node, StorageNode, mask)
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start storage service on {node}") @reporter.step("Start storage service on {node}")
def start_storage_service(self, node: ClusterNode): def start_storage_service(self, node: ClusterNode):
self.start_service_of_type(node, StorageNode) self.start_service_of_type(node, StorageNode)
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start stopped storage services") @reporter.step("Start stopped storage services")
def start_stopped_storage_services(self): def start_stopped_storage_services(self):
self.start_stopped_services_of_type(StorageNode) self.start_stopped_services_of_type(StorageNode)
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop s3 gate on {node}") @reporter.step("Stop s3 gate on {node}")
def stop_s3_gate(self, node: ClusterNode, mask: bool = True): def stop_s3_gate(self, node: ClusterNode, mask: bool = True):
self.stop_service_of_type(node, S3Gate, mask) self.stop_service_of_type(node, S3Gate, mask)
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start s3 gate on {node}") @reporter.step("Start s3 gate on {node}")
def start_s3_gate(self, node: ClusterNode): def start_s3_gate(self, node: ClusterNode):
self.start_service_of_type(node, S3Gate) self.start_service_of_type(node, S3Gate)
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start stopped S3 gates") @reporter.step("Start stopped S3 gates")
def start_stopped_s3_gates(self): def start_stopped_s3_gates(self):
self.start_stopped_services_of_type(S3Gate) self.start_stopped_services_of_type(S3Gate)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Suspend {process_name} service in {node}") @reporter.step("Suspend {process_name} service in {node}")
def suspend_service(self, process_name: str, node: ClusterNode): def suspend_service(self, process_name: str, node: ClusterNode):
node.host.wait_success_suspend_process(process_name) node.host.wait_success_suspend_process(process_name)
if self.suspended_services.get(process_name): if self.suspended_services.get(process_name):
@ -293,20 +292,20 @@ class ClusterStateController:
self.suspended_services[process_name] = [node] self.suspended_services[process_name] = [node]
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Resume {process_name} service in {node}") @reporter.step("Resume {process_name} service in {node}")
def resume_service(self, process_name: str, node: ClusterNode): def resume_service(self, process_name: str, node: ClusterNode):
node.host.wait_success_resume_process(process_name) node.host.wait_success_resume_process(process_name)
if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]: if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]:
self.suspended_services[process_name].remove(node) self.suspended_services[process_name].remove(node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start suspend processes services") @reporter.step("Start suspend processes services")
def resume_suspended_services(self): def resume_suspended_services(self):
for process_name, list_nodes in self.suspended_services.items(): for process_name, list_nodes in self.suspended_services.items():
[node.host.wait_success_resume_process(process_name) for node in list_nodes] [node.host.wait_success_resume_process(process_name) for node in list_nodes]
self.suspended_services = {} self.suspended_services = {}
@reporter.step_deco("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") @reporter.step("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}")
def drop_traffic( def drop_traffic(
self, self,
mode: str, mode: str,
@ -327,7 +326,7 @@ class ClusterStateController:
time.sleep(wakeup_timeout) time.sleep(wakeup_timeout)
self.dropped_traffic.append(node) self.dropped_traffic.append(node)
@reporter.step_deco("Ping traffic") @reporter.step("Ping traffic")
def ping_traffic( def ping_traffic(
self, self,
node: ClusterNode, node: ClusterNode,
@ -343,7 +342,7 @@ class ClusterStateController:
return False return False
return True return True
@reporter.step_deco("Start traffic to {node}") @reporter.step("Start traffic to {node}")
def restore_traffic( def restore_traffic(
self, self,
mode: str, mode: str,
@ -358,12 +357,12 @@ class ClusterStateController:
case "nodes": case "nodes":
IpTablesHelper.restore_input_traffic_to_node(node=node) IpTablesHelper.restore_input_traffic_to_node(node=node)
@reporter.step_deco("Restore blocked nodes") @reporter.step("Restore blocked nodes")
def restore_all_traffic(self): def restore_all_traffic(self):
parallel(self._restore_traffic_to_node, self.dropped_traffic) parallel(self._restore_traffic_to_node, self.dropped_traffic)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Hard reboot host {node} via magic SysRq option") @reporter.step("Hard reboot host {node} via magic SysRq option")
def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True): def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True):
shell = node.host.get_shell() shell = node.host.get_shell()
shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"')
@ -383,14 +382,14 @@ class ClusterStateController:
if startup_healthcheck: if startup_healthcheck:
self.wait_startup_healthcheck() self.wait_startup_healthcheck()
@reporter.step_deco("Down {interface} to {nodes}") @reporter.step("Down {interface} to {nodes}")
def down_interface(self, nodes: list[ClusterNode], interface: str): def down_interface(self, nodes: list[ClusterNode], interface: str):
for node in nodes: for node in nodes:
if_up_down_helper.down_interface(node=node, interface=interface) if_up_down_helper.down_interface(node=node, interface=interface)
assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN" assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN"
self.nodes_with_modified_interface.append(node) self.nodes_with_modified_interface.append(node)
@reporter.step_deco("Up {interface} to {nodes}") @reporter.step("Up {interface} to {nodes}")
def up_interface(self, nodes: list[ClusterNode], interface: str): def up_interface(self, nodes: list[ClusterNode], interface: str):
for node in nodes: for node in nodes:
if_up_down_helper.up_interface(node=node, interface=interface) if_up_down_helper.up_interface(node=node, interface=interface)
@ -398,17 +397,17 @@ class ClusterStateController:
if node in self.nodes_with_modified_interface: if node in self.nodes_with_modified_interface:
self.nodes_with_modified_interface.remove(node) self.nodes_with_modified_interface.remove(node)
@reporter.step_deco("Restore interface") @reporter.step("Restore interface")
def restore_interfaces(self): def restore_interfaces(self):
for node in self.nodes_with_modified_interface: for node in self.nodes_with_modified_interface:
if_up_down_helper.up_all_interface(node) if_up_down_helper.up_all_interface(node)
@reporter.step_deco("Get node time") @reporter.step("Get node time")
def get_node_date(self, node: ClusterNode) -> datetime: def get_node_date(self, node: ClusterNode) -> datetime:
shell = node.host.get_shell() shell = node.host.get_shell()
return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z")
@reporter.step_deco("Set node time to {in_date}") @reporter.step("Set node time to {in_date}")
def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: def change_node_date(self, node: ClusterNode, in_date: datetime) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") shell.exec(f"date -s @{time.mktime(in_date.timetuple())}")
@ -417,7 +416,7 @@ class ClusterStateController:
with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"):
assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1)
@reporter.step_deco(f"Restore time") @reporter.step(f"Restore time")
def restore_node_date(self, node: ClusterNode) -> None: def restore_node_date(self, node: ClusterNode) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
now_time = datetime.datetime.now(datetime.timezone.utc) now_time = datetime.datetime.now(datetime.timezone.utc)
@ -425,14 +424,14 @@ class ClusterStateController:
shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") shell.exec(f"date -s @{time.mktime(now_time.timetuple())}")
shell.exec("hwclock --systohc") shell.exec("hwclock --systohc")
@reporter.step_deco("Change the synchronizer status to {status}") @reporter.step("Change the synchronizer status to {status}")
def set_sync_date_all_nodes(self, status: str): def set_sync_date_all_nodes(self, status: str):
if status == "active": if status == "active":
parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes)
return return
parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes)
@reporter.step_deco("Set MaintenanceModeAllowed - {status}") @reporter.step("Set MaintenanceModeAllowed - {status}")
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
frostfs_adm = FrostfsAdm( frostfs_adm = FrostfsAdm(
shell=cluster_node.host.get_shell(), shell=cluster_node.host.get_shell(),
@ -441,7 +440,7 @@ class ClusterStateController:
) )
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
@reporter.step_deco("Set mode node to {status}") @reporter.step("Set mode node to {status}")
def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None: def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None:
rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint()
control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() control_endpoint = cluster_node.service(StorageNode).get_control_endpoint()
@ -465,8 +464,7 @@ class ClusterStateController:
self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node) self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node)
@wait_for_success(80, 8) @wait_for_success(80, 8, title="Wait for storage status become {status}")
@reporter.step_deco("Check status node, status - {status}")
def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode): def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode):
frostfs_cli = FrostfsCli( frostfs_cli = FrostfsCli(
shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
@ -537,13 +535,13 @@ class ClusterStateController:
interfaces.append(ip) interfaces.append(ip)
return interfaces return interfaces
@reporter.step_deco("Ping node") @reporter.step("Ping node")
def _ping_host(self, node: ClusterNode): def _ping_host(self, node: ClusterNode):
options = CommandOptions(check=False) options = CommandOptions(check=False)
return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code
@retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.ONLINE) @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.ONLINE)
@reporter.step_deco("Waiting for {node} to go online") @reporter.step("Waiting for {node} to go online")
def _wait_for_host_online(self, node: ClusterNode): def _wait_for_host_online(self, node: ClusterNode):
try: try:
ping_result = self._ping_host(node) ping_result = self._ping_host(node)
@ -555,7 +553,7 @@ class ClusterStateController:
return HostStatus.OFFLINE return HostStatus.OFFLINE
@retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.OFFLINE) @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.OFFLINE)
@reporter.step_deco("Waiting for {node} to go offline") @reporter.step("Waiting for {node} to go offline")
def _wait_for_host_offline(self, node: ClusterNode): def _wait_for_host_offline(self, node: ClusterNode):
try: try:
ping_result = self._ping_host(node) ping_result = self._ping_host(node)

View file

@ -1,13 +1,11 @@
from typing import Any from typing import Any
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager
from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.storage.dataclasses.node_base import ServiceClass
from frostfs_testlib.testing import parallel from frostfs_testlib.testing import parallel
reporter = get_reporter()
class ConfigStateManager(StateManager): class ConfigStateManager(StateManager):
def __init__(self, cluster_state_controller: ClusterStateController) -> None: def __init__(self, cluster_state_controller: ClusterStateController) -> None:
@ -15,7 +13,7 @@ class ConfigStateManager(StateManager):
self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set() self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set()
self.cluster = self.csc.cluster self.cluster = self.csc.cluster
@reporter.step_deco("Change configuration for {service_type} on all nodes") @reporter.step("Change configuration for {service_type} on all nodes")
def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]):
services = self.cluster.services(service_type) services = self.cluster.services(service_type)
nodes = self.cluster.nodes(services) nodes = self.cluster.nodes(services)
@ -25,7 +23,7 @@ class ConfigStateManager(StateManager):
parallel([node.config(service_type).set for node in nodes], values=values) parallel([node.config(service_type).set for node in nodes], values=values)
self.csc.start_services_of_type(service_type) self.csc.start_services_of_type(service_type)
@reporter.step_deco("Change configuration for {service_type} on {node}") @reporter.step("Change configuration for {service_type} on {node}")
def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]):
self.services_with_changed_config.add((node, service_type)) self.services_with_changed_config.add((node, service_type))
@ -33,7 +31,7 @@ class ConfigStateManager(StateManager):
node.config(service_type).set(values) node.config(service_type).set(values)
self.csc.start_service_of_type(node, service_type) self.csc.start_service_of_type(node, service_type)
@reporter.step_deco("Revert all configuration changes") @reporter.step("Revert all configuration changes")
def revert_all(self): def revert_all(self):
if not self.services_with_changed_config: if not self.services_with_changed_config:
return return
@ -44,7 +42,7 @@ class ConfigStateManager(StateManager):
self.csc.start_all_stopped_services() self.csc.start_all_stopped_services()
# TODO: parallel can't have multiple parallel_items :( # TODO: parallel can't have multiple parallel_items :(
@reporter.step_deco("Revert all configuration {node_and_service}") @reporter.step("Revert all configuration {node_and_service}")
def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]):
node, service_type = node_and_service node, service_type = node_and_service
self.csc.stop_service_of_type(node, service_type) self.csc.stop_service_of_type(node, service_type)

View file

@ -4,16 +4,14 @@ from typing import Optional, TypedDict, TypeVar
import yaml import yaml
from frostfs_testlib import reporter
from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.config import ServiceConfig
from frostfs_testlib.hosting.interfaces import Host from frostfs_testlib.hosting.interfaces import Host
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.testing.readable import HumanReadableABC
from frostfs_testlib.utils import wallet_utils from frostfs_testlib.utils import wallet_utils
reporter = get_reporter()
@dataclass @dataclass
class NodeBase(HumanReadableABC): class NodeBase(HumanReadableABC):

View file

@ -1,7 +1,7 @@
import time import time
from typing import Optional from typing import Optional
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps import epoch from frostfs_testlib.steps import epoch
@ -9,15 +9,13 @@ from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
reporter = get_reporter()
# To skip adding every mandatory singleton dependency to EACH test function # To skip adding every mandatory singleton dependency to EACH test function
class ClusterTestBase: class ClusterTestBase:
shell: Shell shell: Shell
cluster: Cluster cluster: Cluster
@reporter.step_deco("Tick {epochs_to_tick} epochs, wait {wait_block} block") @reporter.step("Tick {epochs_to_tick} epochs, wait {wait_block} block")
def tick_epochs( def tick_epochs(
self, self,
epochs_to_tick: int, epochs_to_tick: int,

View file

@ -19,10 +19,9 @@ from typing import Dict, List, TypedDict, Union
import pexpect import pexpect
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
COLOR_GREEN = "\033[92m" COLOR_GREEN = "\033[92m"
COLOR_OFF = "\033[0m" COLOR_OFF = "\033[0m"
@ -65,9 +64,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str =
return cmd.decode() return cmd.decode()
def _attach_allure_log( def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime) -> None:
cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime
) -> None:
command_attachment = ( command_attachment = (
f"COMMAND: '{cmd}'\n" f"COMMAND: '{cmd}'\n"
f"OUTPUT:\n {output}\n" f"OUTPUT:\n {output}\n"

View file

@ -1,13 +1,12 @@
import logging import logging
import re import re
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Read environment.properties") @reporter.step("Read environment.properties")
def read_env_properties(file_path: str) -> dict: def read_env_properties(file_path: str) -> dict:
with open(file_path, "r") as file: with open(file_path, "r") as file:
raw_content = file.read() raw_content = file.read()
@ -23,7 +22,7 @@ def read_env_properties(file_path: str) -> dict:
return env_properties return env_properties
@reporter.step_deco("Update data in environment.properties") @reporter.step("Update data in environment.properties")
def save_env_properties(file_path: str, env_data: dict) -> None: def save_env_properties(file_path: str, env_data: dict) -> None:
with open(file_path, "a+") as env_file: with open(file_path, "a+") as env_file:
for env, env_value in env_data.items(): for env, env_value in env_data.items():

View file

@ -3,7 +3,7 @@ from dataclasses import dataclass
from time import sleep from time import sleep
from typing import Optional from typing import Optional
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import neo_go_dump_keys from frostfs_testlib.steps.cli.object import neo_go_dump_keys
@ -15,12 +15,10 @@ from frostfs_testlib.storage.dataclasses.node_base import ServiceClass
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.datetime_utils import parse_time from frostfs_testlib.utils.datetime_utils import parse_time
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Check and return status of given service") @reporter.step("Check and return status of given service")
def service_status(service: str, shell: Shell) -> str: def service_status(service: str, shell: Shell) -> str:
return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip()
@ -73,14 +71,14 @@ class TopCommand:
) )
@reporter.step_deco("Run `top` command with specified PID") @reporter.step("Run `top` command with specified PID")
def service_status_top(service: str, shell: Shell) -> TopCommand: def service_status_top(service: str, shell: Shell) -> TopCommand:
pid = service_pid(service, shell) pid = service_pid(service, shell)
output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout
return TopCommand.from_stdout(output, pid) return TopCommand.from_stdout(output, pid)
@reporter.step_deco("Restart service n times with sleep") @reporter.step("Restart service n times with sleep")
def multiple_restart( def multiple_restart(
service_type: type[NodeBase], service_type: type[NodeBase],
node: ClusterNode, node: ClusterNode,
@ -95,8 +93,7 @@ def multiple_restart(
sleep(sleep_interval) sleep(sleep_interval)
@reporter.step_deco("Get status of list of services and check expected status") @wait_for_success(60, 5, title="Wait for services become {expected_status} on node {cluster_node}")
@wait_for_success(60, 5)
def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str): def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str):
cmd = "" cmd = ""
for service in service_list: for service in service_list:
@ -112,8 +109,7 @@ def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceC
), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}" ), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}"
@reporter.step_deco("Wait for active status of passed service") @wait_for_success(60, 5, title="Wait for {service} become active")
@wait_for_success(60, 5)
def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"): def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"):
real_status = service_status(service=service, shell=shell) real_status = service_status(service=service, shell=shell)
assert ( assert (
@ -121,8 +117,7 @@ def wait_service_in_desired_state(service: str, shell: Shell, expected_status: O
), f"Service {service}: expected status= {expected_status}, real status {real_status}" ), f"Service {service}: expected status= {expected_status}, real status {real_status}"
@reporter.step_deco("Run healthcheck against passed service") @wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1, title="Wait for {service_type} passes healtcheck on {node}")
@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1)
def service_type_healthcheck( def service_type_healthcheck(
service_type: type[NodeBase], service_type: type[NodeBase],
node: ClusterNode, node: ClusterNode,
@ -133,26 +128,25 @@ def service_type_healthcheck(
), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}" ), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}"
@reporter.step_deco("Kill by process name") @reporter.step("Kill by process name")
def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode): def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode):
service_systemctl_name = node.service(service_type).get_service_systemctl_name() service_systemctl_name = node.service(service_type).get_service_systemctl_name()
pid = service_pid(service_systemctl_name, node.host.get_shell()) pid = service_pid(service_systemctl_name, node.host.get_shell())
node.host.get_shell().exec(f"sudo kill -9 {pid}") node.host.get_shell().exec(f"sudo kill -9 {pid}")
@reporter.step_deco("Service {service} suspend") @reporter.step("Suspend {service}")
def suspend_service(shell: Shell, service: str): def suspend_service(shell: Shell, service: str):
shell.exec(f"sudo kill -STOP {service_pid(service, shell)}") shell.exec(f"sudo kill -STOP {service_pid(service, shell)}")
@reporter.step_deco("Service {service} resume") @reporter.step("Resume {service}")
def resume_service(shell: Shell, service: str): def resume_service(shell: Shell, service: str):
shell.exec(f"sudo kill -CONT {service_pid(service, shell)}") shell.exec(f"sudo kill -CONT {service_pid(service, shell)}")
@reporter.step_deco("Retrieve service's pid")
# retry mechanism cause when the task has been started recently '0' PID could be returned # retry mechanism cause when the task has been started recently '0' PID could be returned
@wait_for_success(10, 1) @wait_for_success(10, 1, title="Get {service} pid")
def service_pid(service: str, shell: Shell) -> int: def service_pid(service: str, shell: Shell) -> int:
output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip() output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip()
splitted = output.split("=") splitted = output.split("=")
@ -161,7 +155,7 @@ def service_pid(service: str, shell: Shell) -> int:
return PID return PID
@reporter.step_deco("Wrapper for neo-go dump keys command") @reporter.step("Wrapper for neo-go dump keys command")
def dump_keys(shell: Shell, node: ClusterNode) -> dict: def dump_keys(shell: Shell, node: ClusterNode) -> dict:
host = node.host host = node.host
service_config = host.get_service_config(node.service(MorphChain).name) service_config = host.get_service_config(node.service(MorphChain).name)
@ -169,7 +163,7 @@ def dump_keys(shell: Shell, node: ClusterNode) -> dict:
return neo_go_dump_keys(shell=shell, wallet=wallet) return neo_go_dump_keys(shell=shell, wallet=wallet)
@reporter.step_deco("Wait for object replication") @reporter.step("Wait for object replication")
def wait_object_replication( def wait_object_replication(
cid: str, cid: str,
oid: str, oid: str,

View file

@ -1,17 +1,15 @@
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.storage.dataclasses.node_base import NodeBase from frostfs_testlib.storage.dataclasses.node_base import NodeBase
reporter = get_reporter()
class FileKeeper: class FileKeeper:
"""This class is responsible to make backup copy of modified file and restore when required (mostly after the test)""" """This class is responsible to make backup copy of modified file and restore when required (mostly after the test)"""
files_to_restore: dict[NodeBase, list[str]] = {} files_to_restore: dict[NodeBase, list[str]] = {}
@reporter.step_deco("Adding {file_to_restore} from node {node} to restore list") @reporter.step("Adding {file_to_restore} from node {node} to restore list")
def add(self, node: NodeBase, file_to_restore: str): def add(self, node: NodeBase, file_to_restore: str):
if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]: if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]:
# Already added # Already added
@ -26,7 +24,7 @@ class FileKeeper:
shell = node.host.get_shell() shell = node.host.get_shell()
shell.exec(f"cp {file_to_restore} {file_to_restore}.bak") shell.exec(f"cp {file_to_restore} {file_to_restore}.bak")
@reporter.step_deco("Restore files") @reporter.step("Restore files")
def restore_files(self): def restore_files(self):
nodes = self.files_to_restore.keys() nodes = self.files_to_restore.keys()
if not nodes: if not nodes:
@ -41,7 +39,7 @@ class FileKeeper:
# Iterate through results for exception check if any # Iterate through results for exception check if any
pass pass
@reporter.step_deco("Restore files on node {node}") @reporter.step("Restore files on node {node}")
def _restore_files_on_node(self, node: NodeBase): def _restore_files_on_node(self, node: NodeBase):
shell = node.host.get_shell() shell = node.host.get_shell()
for file_to_restore in self.files_to_restore[node]: for file_to_restore in self.files_to_restore[node]:

View file

@ -4,10 +4,9 @@ import os
import uuid import uuid
from typing import Any, Optional from typing import Any, Optional
from frostfs_testlib.reporter import get_reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.resources.common import ASSETS_DIR
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -61,7 +60,7 @@ def generate_file_with_content(
return file_path return file_path
@reporter.step_deco("Get File Hash") @reporter.step("Get File Hash")
def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str:
"""Generates hash for the specified file. """Generates hash for the specified file.
@ -88,7 +87,7 @@ def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[in
return file_hash.hexdigest() return file_hash.hexdigest()
@reporter.step_deco("Concatenation set of files to one file") @reporter.step("Concatenation set of files to one file")
def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str:
"""Concatenates several files into a single file. """Concatenates several files into a single file.