From dc6b0e407fd8e65bfab85c9ce52770887de9cc20 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 29 Nov 2023 15:27:17 +0300 Subject: [PATCH] [#133] Change reporter usage Signed-off-by: Andrey Berezin --- .../healthcheck/basic_healthcheck.py | 30 ++-- src/frostfs_testlib/load/k6.py | 7 +- src/frostfs_testlib/load/load_verifiers.py | 23 +-- src/frostfs_testlib/load/runners.py | 32 ++-- .../processes/remote_process.py | 62 +++---- src/frostfs_testlib/reporter/__init__.py | 6 + .../reporter/allure_handler.py | 7 +- src/frostfs_testlib/s3/aws_cli_client.py | 151 ++++++++++-------- src/frostfs_testlib/s3/boto3_client.py | 145 ++++++++--------- src/frostfs_testlib/shell/local_shell.py | 3 +- src/frostfs_testlib/shell/ssh_shell.py | 3 +- src/frostfs_testlib/steps/acl.py | 19 +-- src/frostfs_testlib/steps/cli/container.py | 35 ++-- src/frostfs_testlib/steps/cli/object.py | 41 +++-- .../steps/complex_object_actions.py | 7 +- src/frostfs_testlib/steps/epoch.py | 17 +- src/frostfs_testlib/steps/http/http_gate.py | 58 +++---- src/frostfs_testlib/steps/network.py | 30 ++-- src/frostfs_testlib/steps/node_management.py | 61 +++---- src/frostfs_testlib/steps/payment_neogo.py | 24 ++- src/frostfs_testlib/steps/s3/s3_helper.py | 86 +++------- src/frostfs_testlib/steps/session_token.py | 35 ++-- src/frostfs_testlib/steps/storage_object.py | 9 +- src/frostfs_testlib/steps/storage_policy.py | 37 ++--- src/frostfs_testlib/steps/tombstone.py | 21 +-- src/frostfs_testlib/storage/cluster.py | 4 +- .../configuration/service_configuration.py | 4 +- .../controllers/background_load_controller.py | 20 ++- .../controllers/cluster_state_controller.py | 94 ++++++----- .../state_managers/config_state_manager.py | 12 +- .../storage/dataclasses/node_base.py | 4 +- .../testing/cluster_test_base.py | 6 +- src/frostfs_testlib/utils/cli_utils.py | 7 +- src/frostfs_testlib/utils/env_utils.py | 7 +- src/frostfs_testlib/utils/failover_utils.py | 32 ++-- src/frostfs_testlib/utils/file_keeper.py | 10 +- src/frostfs_testlib/utils/file_utils.py | 7 +- 37 files changed, 478 insertions(+), 678 deletions(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 4cb3a48..0443e28 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -1,8 +1,8 @@ from typing import Callable +from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.healthcheck.interfaces import Healthcheck -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.shell import CommandOptions from frostfs_testlib.steps.node_management import storage_node_healthcheck @@ -10,8 +10,6 @@ from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.failover_utils import check_services_status -reporter = get_reporter() - class BasicHealthcheck(Healthcheck): def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]): @@ -23,36 +21,33 @@ class BasicHealthcheck(Healthcheck): assert not issues, "Issues found:\n" + "\n".join(issues) - @wait_for_success(900, 30) + @wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}") def full_healthcheck(self, cluster_node: ClusterNode): checks = { self.storage_healthcheck: {}, self._tree_healthcheck: {}, } - with reporter.step(f"Perform full healthcheck for {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) - @wait_for_success(900, 30) + @wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}") def startup_healthcheck(self, cluster_node: ClusterNode): checks = { self.storage_healthcheck: {}, self._tree_healthcheck: {}, } - with reporter.step(f"Perform startup healthcheck on {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) - @wait_for_success(900, 30) + @wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}") def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: checks = { self._storage_healthcheck: {}, } - with reporter.step(f"Perform storage healthcheck on {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) - @wait_for_success(120, 5) + @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}") def services_healthcheck(self, cluster_node: ClusterNode): svcs_to_check = cluster_node.services checks = { @@ -63,8 +58,7 @@ class BasicHealthcheck(Healthcheck): self._check_services: {"services": svcs_to_check}, } - with reporter.step(f"Perform service healthcheck on {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]): for svc in services: @@ -72,14 +66,14 @@ class BasicHealthcheck(Healthcheck): if result == False: return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}." - @reporter.step_deco("Storage healthcheck on {cluster_node}") + @reporter.step("Storage healthcheck on {cluster_node}") def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: result = storage_node_healthcheck(cluster_node.storage_node) self._gather_socket_info(cluster_node) if result.health_status != "READY" or result.network_status != "ONLINE": return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" - @reporter.step_deco("Tree healthcheck on {cluster_node}") + @reporter.step("Tree healthcheck on {cluster_node}") def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: host = cluster_node.host service_config = host.get_service_config(cluster_node.storage_node.name) @@ -102,6 +96,6 @@ class BasicHealthcheck(Healthcheck): f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" ) - @reporter.step_deco("Gather socket info for {cluster_node}") + @reporter.step("Gather socket info for {cluster_node}") def _gather_socket_info(self, cluster_node: ClusterNode): cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 3dedd53..92da8e0 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -8,10 +8,10 @@ from time import sleep from typing import Any from urllib.parse import urlparse +from frostfs_testlib import reporter from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.processes.remote_process import RemoteProcess -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD from frostfs_testlib.shell import Shell @@ -21,7 +21,6 @@ from frostfs_testlib.testing.test_control import wait_for_success EXIT_RESULT_CODE = 0 logger = logging.getLogger("NeoLogger") -reporter = get_reporter() @dataclass @@ -102,7 +101,7 @@ class K6: self.preset_output = result.stdout.strip("\n") return self.preset_output - @reporter.step_deco("Generate K6 command") + @reporter.step("Generate K6 command") def _generate_env_variables(self) -> str: env_vars = self.load_params.get_env_vars() @@ -216,7 +215,7 @@ class K6: return self._k6_process.running() return False - @reporter.step_deco("Wait until K6 process end") + @reporter.step("Wait until K6 process end") @wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") def _wait_until_process_end(self): return self._k6_process.running() diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index b691b02..fe39862 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -1,11 +1,6 @@ -import logging - +from frostfs_testlib import reporter from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object -from frostfs_testlib.reporter import get_reporter - -reporter = get_reporter() -logger = logging.getLogger("NeoLogger") class LoadVerifier: @@ -49,19 +44,11 @@ class LoadVerifier: if deleters and not delete_operations: issues.append(f"No any delete operation was performed") - if ( - write_operations - and writers - and write_errors / write_operations * 100 > self.load_params.error_threshold - ): + if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold: issues.append( f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" ) - if ( - read_operations - and readers - and read_errors / read_operations * 100 > self.load_params.error_threshold - ): + if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold: issues.append( f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" ) @@ -89,9 +76,7 @@ class LoadVerifier: ) return verify_issues - def _collect_verify_issues_on_process( - self, label, load_summary, verification_summary - ) -> list[str]: + def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]: issues = [] load_metrics = get_metrics_object(self.load_params.scenario, load_summary) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index ea5a374..f5284d8 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -9,13 +9,13 @@ from urllib.parse import urlparse import yaml +from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources import optionals from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import STORAGE_USER_NAME @@ -31,17 +31,15 @@ from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils.file_keeper import FileKeeper -reporter = get_reporter() - class RunnerBase(ScenarioRunner): k6_instances: list[K6] - @reporter.step_deco("Run preset on loaders") + @reporter.step("Run preset on loaders") def preset(self): parallel([k6.preset for k6 in self.k6_instances]) - @reporter.step_deco("Wait until load finish") + @reporter.step("Wait until load finish") def wait_until_finish(self, soft_timeout: int = 0): parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout) @@ -70,7 +68,7 @@ class DefaultRunner(RunnerBase): self.loaders_wallet = loaders_wallet @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Preparation steps") + @reporter.step("Preparation steps") def prepare( self, load_params: LoadParams, @@ -127,7 +125,7 @@ class DefaultRunner(RunnerBase): ] shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) - @reporter.step_deco("Init k6 instances") + @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] cycled_loaders = itertools.cycle(self.loaders) @@ -271,7 +269,7 @@ class LocalRunner(RunnerBase): self.nodes_under_load = nodes_under_load @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Preparation steps") + @reporter.step("Preparation steps") def prepare( self, load_params: LoadParams, @@ -298,7 +296,7 @@ class LocalRunner(RunnerBase): return True - @reporter.step_deco("Prepare node {cluster_node}") + @reporter.step("Prepare node {cluster_node}") def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams): shell = cluster_node.host.get_shell() @@ -323,7 +321,7 @@ class LocalRunner(RunnerBase): shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") - @reporter.step_deco("Init k6 instances") + @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] futures = parallel( @@ -369,12 +367,12 @@ class LocalRunner(RunnerBase): with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): time.sleep(wait_after_start_time) - @reporter.step_deco("Restore passwd on {cluster_node}") + @reporter.step("Restore passwd on {cluster_node}") def restore_passwd_on_node(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("sudo chattr -i /etc/passwd") - @reporter.step_deco("Lock passwd on {cluster_node}") + @reporter.step("Lock passwd on {cluster_node}") def lock_passwd_on_node(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("sudo chattr +i /etc/passwd") @@ -400,19 +398,19 @@ class S3LocalRunner(LocalRunner): endpoints: list[str] k6_dir: str - @reporter.step_deco("Run preset on loaders") + @reporter.step("Run preset on loaders") def preset(self): LocalRunner.preset(self) with reporter.step(f"Resolve containers in preset"): parallel(self._resolve_containers_in_preset, self.k6_instances) - @reporter.step_deco("Resolve containers in preset") + @reporter.step("Resolve containers in preset") def _resolve_containers_in_preset(self, k6_instance: K6): k6_instance.shell.exec( f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}" ) - @reporter.step_deco("Init k6 instances") + @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] futures = parallel( @@ -448,7 +446,7 @@ class S3LocalRunner(LocalRunner): ) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Preparation steps") + @reporter.step("Preparation steps") def prepare( self, load_params: LoadParams, @@ -464,7 +462,7 @@ class S3LocalRunner(LocalRunner): parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer) - @reporter.step_deco("Prepare node {cluster_node}") + @reporter.step("Prepare node {cluster_node}") def prepare_node( self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str ): diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index d92d77a..1252b97 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -8,18 +8,14 @@ from tenacity import retry from tenacity.stop import stop_after_attempt from tenacity.wait import wait_fixed -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell import Shell from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions -reporter = get_reporter() - class RemoteProcess: - def __init__( - self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector] - ): + def __init__(self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector]): self.process_dir = process_dir self.cmd = cmd self.stdout_last_line_number = 0 @@ -32,10 +28,8 @@ class RemoteProcess: self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] @classmethod - @reporter.step_deco("Create remote process") - def create( - cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None - ) -> RemoteProcess: + @reporter.step("Create remote process") + def create(cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None) -> RemoteProcess: """ Create a process on a remote host. @@ -68,7 +62,7 @@ class RemoteProcess: remote_process.pid = remote_process._get_pid() return remote_process - @reporter.step_deco("Get process stdout") + @reporter.step("Get process stdout") def stdout(self, full: bool = False) -> str: """ Method to get process stdout, either fresh info or full. @@ -100,7 +94,7 @@ class RemoteProcess: return resulted_stdout return "" - @reporter.step_deco("Get process stderr") + @reporter.step("Get process stderr") def stderr(self, full: bool = False) -> str: """ Method to get process stderr, either fresh info or full. @@ -131,7 +125,7 @@ class RemoteProcess: return resulted_stderr return "" - @reporter.step_deco("Get process rc") + @reporter.step("Get process rc") def rc(self) -> Optional[int]: if self.proc_rc is not None: return self.proc_rc @@ -148,11 +142,11 @@ class RemoteProcess: self.proc_rc = int(terminal.stdout) return self.proc_rc - @reporter.step_deco("Check if process is running") + @reporter.step("Check if process is running") def running(self) -> bool: return self.rc() is None - @reporter.step_deco("Send signal to process") + @reporter.step("Send signal to process") def send_signal(self, signal: int) -> None: kill_res = self.shell.exec( f"kill -{signal} {self.pid}", @@ -161,27 +155,23 @@ class RemoteProcess: if "No such process" in kill_res.stderr: return if kill_res.return_code: - raise AssertionError( - f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}" - ) + raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}") - @reporter.step_deco("Stop process") + @reporter.step("Stop process") def stop(self) -> None: self.send_signal(15) - @reporter.step_deco("Kill process") + @reporter.step("Kill process") def kill(self) -> None: self.send_signal(9) - @reporter.step_deco("Clear process directory") + @reporter.step("Clear process directory") def clear(self) -> None: if self.process_dir == "/": raise AssertionError(f"Invalid path to delete: {self.process_dir}") - self.shell.exec( - f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) + self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) - @reporter.step_deco("Start remote process") + @reporter.step("Start remote process") def _start_process(self) -> None: self.shell.exec( f"nohup {self.process_dir}/command.sh None: - self.shell.exec( - f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) - self.shell.exec( - f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) - terminal = self.shell.exec( - f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) + self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) self.process_dir = terminal.stdout.strip() - @reporter.step_deco("Get pid") + @reporter.step("Get pid") @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) def _get_pid(self) -> str: - terminal = self.shell.exec( - f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) + terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors)) assert terminal.stdout, f"invalid pid: {terminal.stdout}" return terminal.stdout.strip() - @reporter.step_deco("Generate command script") + @reporter.step("Generate command script") def _generate_command_script(self, command: str) -> None: command = command.replace('"', '\\"').replace("\\", "\\\\") script = ( diff --git a/src/frostfs_testlib/reporter/__init__.py b/src/frostfs_testlib/reporter/__init__.py index e2c113c..848c175 100644 --- a/src/frostfs_testlib/reporter/__init__.py +++ b/src/frostfs_testlib/reporter/__init__.py @@ -1,3 +1,5 @@ +from typing import Any + from frostfs_testlib.reporter.allure_handler import AllureHandler from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.reporter import Reporter @@ -20,3 +22,7 @@ def get_reporter() -> Reporter: def step(title: str): return __reporter.step(title) + + +def attach(content: Any, file_name: str): + return __reporter.attach(content, file_name) diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py index 9089f98..ef63638 100644 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -21,9 +21,14 @@ class AllureHandler(ReporterHandler): def attach(self, body: Any, file_name: str) -> None: attachment_name, extension = os.path.splitext(file_name) + if extension.startswith("."): + extension = extension[1:] attachment_type = self._resolve_attachment_type(extension) - allure.attach(body, attachment_name, attachment_type, extension) + if os.path.exists(body): + allure.attach.file(body, file_name, attachment_type, extension) + else: + allure.attach(body, attachment_name, attachment_type, extension) def _resolve_attachment_type(self, extension: str) -> attachment_type: """Try to find matching Allure attachment type by extension. diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 059e949..e4f2bb2 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -6,7 +6,7 @@ from datetime import datetime from time import sleep from typing import Literal, Optional, Union -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions @@ -15,7 +15,6 @@ from frostfs_testlib.shell.local_shell import LocalShell # TODO: Refactor this code to use shell instead of _cmd_run from frostfs_testlib.utils.cli_utils import _configure_aws_cli -reporter = get_reporter() logger = logging.getLogger("NeoLogger") command_options = CommandOptions(timeout=480) @@ -28,8 +27,10 @@ class AwsCliClient(S3ClientWrapper): common_flags = "--no-verify-ssl --no-paginate" s3gate_endpoint: str - @reporter.step_deco("Configure S3 client (aws cli)") - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: + @reporter.step("Configure S3 client (aws cli)") + def __init__( + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" + ) -> None: self.s3gate_endpoint = s3gate_endpoint self.profile = profile self.local_shell = LocalShell() @@ -42,11 +43,11 @@ class AwsCliClient(S3ClientWrapper): except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err - @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") def set_endpoint(self, s3gate_endpoint: str): self.s3gate_endpoint = s3gate_endpoint - @reporter.step_deco("Create bucket S3") + @reporter.step("Create bucket S3") def create_bucket( self, bucket: Optional[str] = None, @@ -85,25 +86,25 @@ class AwsCliClient(S3ClientWrapper): return bucket - @reporter.step_deco("List buckets S3") + @reporter.step("List buckets S3") def list_buckets(self) -> list[str]: cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout buckets_json = self._to_json(output) return [bucket["Name"] for bucket in buckets_json["Buckets"]] - @reporter.step_deco("Delete bucket S3") + @reporter.step("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd, command_options) sleep(S3_SYNC_WAIT_TIME) - @reporter.step_deco("Head bucket S3") + @reporter.step("Head bucket S3") def head_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd) - @reporter.step_deco("Put bucket versioning status") + @reporter.step("Put bucket versioning status") def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: cmd = ( f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " @@ -112,7 +113,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get bucket versioning status") + @reporter.step("Get bucket versioning status") def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: cmd = ( f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " @@ -122,7 +123,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Status") - @reporter.step_deco("Put bucket tagging") + @reporter.step("Put bucket tagging") def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} cmd = ( @@ -131,34 +132,42 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get bucket tagging") + @reporter.step("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: cmd = ( - f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("TagSet") - @reporter.step_deco("Get bucket acl") + @reporter.step("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: - cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") - @reporter.step_deco("Get bucket location") + @reporter.step("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: cmd = ( - f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("LocationConstraint") - @reporter.step_deco("List objects S3") + @reporter.step("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api list-objects --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -167,9 +176,12 @@ class AwsCliClient(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects S3 v2") + @reporter.step("List objects S3 v2") def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -178,7 +190,7 @@ class AwsCliClient(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects versions S3") + @reporter.step("List objects versions S3") def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " @@ -188,7 +200,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response if full_output else response.get("Versions", []) - @reporter.step_deco("List objects delete markers S3") + @reporter.step("List objects delete markers S3") def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " @@ -198,7 +210,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response if full_output else response.get("DeleteMarkers", []) - @reporter.step_deco("Copy object S3") + @reporter.step("Copy object S3") def copy_object( self, source_bucket: str, @@ -236,7 +248,7 @@ class AwsCliClient(S3ClientWrapper): self.local_shell.exec(cmd, command_options) return key - @reporter.step_deco("Put object S3") + @reporter.step("Put object S3") def put_object( self, bucket: str, @@ -280,7 +292,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("VersionId") - @reporter.step_deco("Head object S3") + @reporter.step("Head object S3") def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -291,7 +303,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response - @reporter.step_deco("Get object S3") + @reporter.step("Get object S3") def get_object( self, bucket: str, @@ -312,7 +324,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response if full_output else file_path - @reporter.step_deco("Get object ACL") + @reporter.step("Get object ACL") def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -323,7 +335,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Grants") - @reporter.step_deco("Put object ACL") + @reporter.step("Put object ACL") def put_object_acl( self, bucket: str, @@ -346,7 +358,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Grants") - @reporter.step_deco("Put bucket ACL") + @reporter.step("Put bucket ACL") def put_bucket_acl( self, bucket: str, @@ -354,7 +366,10 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " + f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) if acl: cmd += f" --acl {acl}" if grant_write: @@ -363,7 +378,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-read {grant_read}" self.local_shell.exec(cmd) - @reporter.step_deco("Delete objects S3") + @reporter.step("Delete objects S3") def delete_objects(self, bucket: str, keys: list[str]) -> dict: file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") delete_structure = json.dumps(_make_objs_dict(keys)) @@ -380,7 +395,7 @@ class AwsCliClient(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return response - @reporter.step_deco("Delete object S3") + @reporter.step("Delete object S3") def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -391,7 +406,7 @@ class AwsCliClient(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) - @reporter.step_deco("Delete object versions S3") + @reporter.step("Delete object versions S3") def delete_object_versions(self, bucket: str, object_versions: list) -> dict: # Build deletion list in S3 format delete_list = { @@ -418,13 +433,13 @@ class AwsCliClient(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) - @reporter.step_deco("Delete object versions S3 without delete markers") + @reporter.step("Delete object versions S3 without delete markers") def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) - @reporter.step_deco("Get object attributes") + @reporter.step("Get object attributes") def get_object_attributes( self, bucket: str, @@ -456,14 +471,17 @@ class AwsCliClient(S3ClientWrapper): else: return response.get(attributes[0]) - @reporter.step_deco("Get bucket policy") + @reporter.step("Get bucket policy") def get_bucket_policy(self, bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Policy") - @reporter.step_deco("Put bucket policy") + @reporter.step("Put bucket policy") def put_bucket_policy(self, bucket: str, policy: dict) -> None: # Leaving it as is was in test repo. Double dumps to escape resulting string # Example: @@ -478,14 +496,17 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get bucket cors") + @reporter.step("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("CORSRules") - @reporter.step_deco("Put bucket cors") + @reporter.step("Put bucket cors") def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: cmd = ( f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " @@ -493,14 +514,15 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Delete bucket cors") + @reporter.step("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: cmd = ( - f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) - @reporter.step_deco("Delete bucket tagging") + @reporter.step("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: cmd = ( f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " @@ -508,7 +530,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Put object retention") + @reporter.step("Put object retention") def put_object_retention( self, bucket: str, @@ -526,7 +548,7 @@ class AwsCliClient(S3ClientWrapper): cmd += " --bypass-governance-retention" self.local_shell.exec(cmd) - @reporter.step_deco("Put object legal hold") + @reporter.step("Put object legal hold") def put_object_legal_hold( self, bucket: str, @@ -542,7 +564,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Put object tagging") + @reporter.step("Put object tagging") def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} @@ -552,7 +574,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get object tagging") + @reporter.step("Get object tagging") def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -563,7 +585,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("TagSet") - @reporter.step_deco("Delete object tagging") + @reporter.step("Delete object tagging") def delete_object_tagging(self, bucket: str, key: str) -> None: cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " @@ -571,7 +593,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Sync directory S3") + @reporter.step("Sync directory S3") def sync( self, bucket: str, @@ -579,7 +601,10 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: - cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) if metadata: cmd += " --metadata" for key, value in metadata.items(): @@ -589,7 +614,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) - @reporter.step_deco("CP directory S3") + @reporter.step("CP directory S3") def cp( self, bucket: str, @@ -610,7 +635,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) - @reporter.step_deco("Create multipart upload S3") + @reporter.step("Create multipart upload S3") def create_multipart_upload(self, bucket: str, key: str) -> str: cmd = ( f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " @@ -623,7 +648,7 @@ class AwsCliClient(S3ClientWrapper): return response["UploadId"] - @reporter.step_deco("List multipart uploads S3") + @reporter.step("List multipart uploads S3") def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: cmd = ( f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " @@ -633,7 +658,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Uploads") - @reporter.step_deco("Abort multipart upload S3") + @reporter.step("Abort multipart upload S3") def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: cmd = ( f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " @@ -641,7 +666,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Upload part S3") + @reporter.step("Upload part S3") def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " @@ -653,7 +678,7 @@ class AwsCliClient(S3ClientWrapper): assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] - @reporter.step_deco("Upload copy part S3") + @reporter.step("Upload copy part S3") def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " @@ -666,7 +691,7 @@ class AwsCliClient(S3ClientWrapper): return response["CopyPartResult"]["ETag"] - @reporter.step_deco("List parts S3") + @reporter.step("List parts S3") def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: cmd = ( f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " @@ -679,7 +704,7 @@ class AwsCliClient(S3ClientWrapper): return response["Parts"] - @reporter.step_deco("Complete multipart upload S3") + @reporter.step("Complete multipart upload S3") def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} @@ -696,7 +721,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Put object lock configuration") + @reporter.step("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: cmd = ( f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " @@ -705,7 +730,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout return self._to_json(output) - @reporter.step_deco("Get object lock configuration") + @reporter.step("Get object lock configuration") def get_object_lock_configuration(self, bucket: str): cmd = ( f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index ba3716a..bdb177e 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -13,17 +13,11 @@ from botocore.config import Config from botocore.exceptions import ClientError from mypy_boto3_s3 import S3Client -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.common import ( - ASSETS_DIR, - MAX_REQUEST_ATTEMPTS, - RETRY_MODE, - S3_SYNC_WAIT_TIME, -) +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.utils.cli_utils import log_command_execution -reporter = get_reporter() logger = logging.getLogger("NeoLogger") # Disable warnings on self-signed certificate which the @@ -46,9 +40,11 @@ def report_error(func): class Boto3ClientWrapper(S3ClientWrapper): __repr_name__: str = "Boto3 client" - @reporter.step_deco("Configure S3 client (boto3)") + @reporter.step("Configure S3 client (boto3)") @report_error - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: + def __init__( + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" + ) -> None: self.boto3_client: S3Client = None self.session = boto3.Session(profile_name=profile) self.config = Config( @@ -62,7 +58,7 @@ class Boto3ClientWrapper(S3ClientWrapper): self.s3gate_endpoint: str = "" self.set_endpoint(s3gate_endpoint) - @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") def set_endpoint(self, s3gate_endpoint: str): if self.s3gate_endpoint == s3gate_endpoint: return @@ -90,7 +86,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return result # BUCKET METHODS # - @reporter.step_deco("Create bucket S3") + @reporter.step("Create bucket S3") @report_error def create_bucket( self, @@ -118,16 +114,14 @@ class Boto3ClientWrapper(S3ClientWrapper): elif grant_full_control: params.update({"GrantFullControl": grant_full_control}) if location_constraint: - params.update( - {"CreateBucketConfiguration": {"LocationConstraint": location_constraint}} - ) + params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) s3_bucket = self.boto3_client.create_bucket(**params) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) sleep(S3_SYNC_WAIT_TIME) return bucket - @reporter.step_deco("List buckets S3") + @reporter.step("List buckets S3") @report_error def list_buckets(self) -> list[str]: found_buckets = [] @@ -140,20 +134,20 @@ class Boto3ClientWrapper(S3ClientWrapper): return found_buckets - @reporter.step_deco("Delete bucket S3") + @reporter.step("Delete bucket S3") @report_error def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) log_command_execution("S3 Delete bucket result", response) sleep(S3_SYNC_WAIT_TIME) - @reporter.step_deco("Head bucket S3") + @reporter.step("Head bucket S3") @report_error def head_bucket(self, bucket: str) -> None: response = self.boto3_client.head_bucket(Bucket=bucket) log_command_execution("S3 Head bucket result", response) - @reporter.step_deco("Put bucket versioning status") + @reporter.step("Put bucket versioning status") @report_error def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: response = self.boto3_client.put_bucket_versioning( @@ -161,7 +155,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Set bucket versioning to", response) - @reporter.step_deco("Get bucket versioning status") + @reporter.step("Get bucket versioning status") @report_error def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: response = self.boto3_client.get_bucket_versioning(Bucket=bucket) @@ -169,7 +163,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Got bucket versioning status", response) return status - @reporter.step_deco("Put bucket tagging") + @reporter.step("Put bucket tagging") @report_error def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] @@ -177,27 +171,27 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging) log_command_execution("S3 Put bucket tagging", response) - @reporter.step_deco("Get bucket tagging") + @reporter.step("Get bucket tagging") @report_error def get_bucket_tagging(self, bucket: str) -> list: response = self.boto3_client.get_bucket_tagging(Bucket=bucket) log_command_execution("S3 Get bucket tagging", response) return response.get("TagSet") - @reporter.step_deco("Get bucket acl") + @reporter.step("Get bucket acl") @report_error def get_bucket_acl(self, bucket: str) -> list: response = self.boto3_client.get_bucket_acl(Bucket=bucket) log_command_execution("S3 Get bucket acl", response) return response.get("Grants") - @reporter.step_deco("Delete bucket tagging") + @reporter.step("Delete bucket tagging") @report_error def delete_bucket_tagging(self, bucket: str) -> None: response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) log_command_execution("S3 Delete bucket tagging", response) - @reporter.step_deco("Put bucket ACL") + @reporter.step("Put bucket ACL") @report_error def put_bucket_acl( self, @@ -214,60 +208,56 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_bucket_acl(**params) log_command_execution("S3 ACL bucket result", response) - @reporter.step_deco("Put object lock configuration") + @reporter.step("Put object lock configuration") @report_error def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - response = self.boto3_client.put_object_lock_configuration( - Bucket=bucket, ObjectLockConfiguration=configuration - ) + response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration) log_command_execution("S3 put_object_lock_configuration result", response) return response - @reporter.step_deco("Get object lock configuration") + @reporter.step("Get object lock configuration") @report_error def get_object_lock_configuration(self, bucket: str) -> dict: response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) log_command_execution("S3 get_object_lock_configuration result", response) return response.get("ObjectLockConfiguration") - @reporter.step_deco("Get bucket policy") + @reporter.step("Get bucket policy") @report_error def get_bucket_policy(self, bucket: str) -> str: response = self.boto3_client.get_bucket_policy(Bucket=bucket) log_command_execution("S3 get_bucket_policy result", response) return response.get("Policy") - @reporter.step_deco("Put bucket policy") + @reporter.step("Put bucket policy") @report_error def put_bucket_policy(self, bucket: str, policy: dict) -> None: response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)) log_command_execution("S3 put_bucket_policy result", response) return response - @reporter.step_deco("Get bucket cors") + @reporter.step("Get bucket cors") @report_error def get_bucket_cors(self, bucket: str) -> dict: response = self.boto3_client.get_bucket_cors(Bucket=bucket) log_command_execution("S3 get_bucket_cors result", response) return response.get("CORSRules") - @reporter.step_deco("Get bucket location") + @reporter.step("Get bucket location") @report_error def get_bucket_location(self, bucket: str) -> str: response = self.boto3_client.get_bucket_location(Bucket=bucket) log_command_execution("S3 get_bucket_location result", response) return response.get("LocationConstraint") - @reporter.step_deco("Put bucket cors") + @reporter.step("Put bucket cors") @report_error def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - response = self.boto3_client.put_bucket_cors( - Bucket=bucket, CORSConfiguration=cors_configuration - ) + response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration) log_command_execution("S3 put_bucket_cors result", response) return response - @reporter.step_deco("Delete bucket cors") + @reporter.step("Delete bucket cors") @report_error def delete_bucket_cors(self, bucket: str) -> None: response = self.boto3_client.delete_bucket_cors(Bucket=bucket) @@ -276,7 +266,7 @@ class Boto3ClientWrapper(S3ClientWrapper): # END OF BUCKET METHODS # # OBJECT METHODS # - @reporter.step_deco("List objects S3 v2") + @reporter.step("List objects S3 v2") @report_error def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: response = self.boto3_client.list_objects_v2(Bucket=bucket) @@ -287,7 +277,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects S3") + @reporter.step("List objects S3") @report_error def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: response = self.boto3_client.list_objects(Bucket=bucket) @@ -298,21 +288,21 @@ class Boto3ClientWrapper(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects versions S3") + @reporter.step("List objects versions S3") @report_error def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: response = self.boto3_client.list_object_versions(Bucket=bucket) log_command_execution("S3 List objects versions result", response) return response if full_output else response.get("Versions", []) - @reporter.step_deco("List objects delete markers S3") + @reporter.step("List objects delete markers S3") @report_error def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: response = self.boto3_client.list_object_versions(Bucket=bucket) log_command_execution("S3 List objects delete markers result", response) return response if full_output else response.get("DeleteMarkers", []) - @reporter.step_deco("Put object S3") + @reporter.step("Put object S3") @report_error def put_object( self, @@ -343,7 +333,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Put object result", response) return response.get("VersionId") - @reporter.step_deco("Head object S3") + @reporter.step("Head object S3") @report_error def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: params = { @@ -355,7 +345,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Head object result", response) return response - @reporter.step_deco("Delete object S3") + @reporter.step("Delete object S3") @report_error def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: params = { @@ -368,7 +358,7 @@ class Boto3ClientWrapper(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return response - @reporter.step_deco("Delete objects S3") + @reporter.step("Delete objects S3") @report_error def delete_objects(self, bucket: str, keys: list[str]) -> dict: response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys)) @@ -379,7 +369,7 @@ class Boto3ClientWrapper(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return response - @reporter.step_deco("Delete object versions S3") + @reporter.step("Delete object versions S3") @report_error def delete_object_versions(self, bucket: str, object_versions: list) -> dict: # Build deletion list in S3 format @@ -396,7 +386,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Delete objects result", response) return response - @reporter.step_deco("Delete object versions S3 without delete markers") + @reporter.step("Delete object versions S3 without delete markers") @report_error def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers @@ -406,7 +396,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Delete object result", response) - @reporter.step_deco("Put object ACL") + @reporter.step("Put object ACL") @report_error def put_object_acl( self, @@ -419,7 +409,7 @@ class Boto3ClientWrapper(S3ClientWrapper): # pytest.skip("Method put_object_acl is not supported by boto3 client") raise NotImplementedError("Unsupported for boto3 client") - @reporter.step_deco("Get object ACL") + @reporter.step("Get object ACL") @report_error def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: params = { @@ -431,7 +421,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 ACL objects result", response) return response.get("Grants") - @reporter.step_deco("Copy object S3") + @reporter.step("Copy object S3") @report_error def copy_object( self, @@ -460,7 +450,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Copy objects result", response) return key - @reporter.step_deco("Get object S3") + @reporter.step("Get object S3") @report_error def get_object( self, @@ -478,8 +468,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params = { self._to_s3_param(param): value for param, value in {**locals(), **{"Range": range_str}}.items() - if param not in ["self", "object_range", "full_output", "range_str", "filename"] - and value is not None + if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None } response = self.boto3_client.get_object(**params) log_command_execution("S3 Get objects result", response) @@ -491,7 +480,7 @@ class Boto3ClientWrapper(S3ClientWrapper): chunk = response["Body"].read(1024) return response if full_output else filename - @reporter.step_deco("Create multipart upload S3") + @reporter.step("Create multipart upload S3") @report_error def create_multipart_upload(self, bucket: str, key: str) -> str: response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key) @@ -500,7 +489,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["UploadId"] - @reporter.step_deco("List multipart uploads S3") + @reporter.step("List multipart uploads S3") @report_error def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: response = self.boto3_client.list_multipart_uploads(Bucket=bucket) @@ -508,19 +497,15 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("Uploads") - @reporter.step_deco("Abort multipart upload S3") + @reporter.step("Abort multipart upload S3") @report_error def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - response = self.boto3_client.abort_multipart_upload( - Bucket=bucket, Key=key, UploadId=upload_id - ) + response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) log_command_execution("S3 Abort multipart upload", response) - @reporter.step_deco("Upload part S3") + @reporter.step("Upload part S3") @report_error - def upload_part( - self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str - ) -> str: + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: with open(filepath, "rb") as put_file: body = put_file.read() @@ -536,11 +521,9 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["ETag"] - @reporter.step_deco("Upload copy part S3") + @reporter.step("Upload copy part S3") @report_error - def upload_part_copy( - self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str - ) -> str: + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: response = self.boto3_client.upload_part_copy( UploadId=upload_id, Bucket=bucket, @@ -549,13 +532,11 @@ class Boto3ClientWrapper(S3ClientWrapper): CopySource=copy_source, ) log_command_execution("S3 Upload copy part", response) - assert response.get("CopyPartResult", []).get( - "ETag" - ), f"Expected ETag in response:\n{response}" + assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" return response["CopyPartResult"]["ETag"] - @reporter.step_deco("List parts S3") + @reporter.step("List parts S3") @report_error def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key) @@ -564,7 +545,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["Parts"] - @reporter.step_deco("Complete multipart upload S3") + @reporter.step("Complete multipart upload S3") @report_error def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] @@ -573,7 +554,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Complete multipart upload", response) - @reporter.step_deco("Put object retention") + @reporter.step("Put object retention") @report_error def put_object_retention( self, @@ -591,7 +572,7 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_object_retention(**params) log_command_execution("S3 Put object retention ", response) - @reporter.step_deco("Put object legal hold") + @reporter.step("Put object legal hold") @report_error def put_object_legal_hold( self, @@ -609,7 +590,7 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_object_legal_hold(**params) log_command_execution("S3 Put object legal hold ", response) - @reporter.step_deco("Put object tagging") + @reporter.step("Put object tagging") @report_error def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] @@ -617,7 +598,7 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging) log_command_execution("S3 Put object tagging", response) - @reporter.step_deco("Get object tagging") + @reporter.step("Get object tagging") @report_error def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: params = { @@ -629,13 +610,13 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Get object tagging", response) return response.get("TagSet") - @reporter.step_deco("Delete object tagging") + @reporter.step("Delete object tagging") @report_error def delete_object_tagging(self, bucket: str, key: str) -> None: response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key) log_command_execution("S3 Delete object tagging", response) - @reporter.step_deco("Get object attributes") + @reporter.step("Get object attributes") @report_error def get_object_attributes( self, @@ -650,7 +631,7 @@ class Boto3ClientWrapper(S3ClientWrapper): logger.warning("Method get_object_attributes is not supported by boto3 client") return {} - @reporter.step_deco("Sync directory S3") + @reporter.step("Sync directory S3") @report_error def sync( self, @@ -661,7 +642,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> dict: raise NotImplementedError("Sync is not supported for boto3 client") - @reporter.step_deco("CP directory S3") + @reporter.step("CP directory S3") @report_error def cp( self, diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 26c7e9b..acf01ff 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -6,11 +6,10 @@ from typing import IO, Optional import pexpect -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("frostfs.testlib.shell") -reporter = get_reporter() class LocalShell(Shell): diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 6b12f81..a7e6e1d 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -9,11 +9,10 @@ from typing import ClassVar, Optional, Tuple from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception from paramiko.ssh_exception import AuthenticationException -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials logger = logging.getLogger("frostfs.testlib.shell") -reporter = get_reporter() class SshConnectionProvider: diff --git a/src/frostfs_testlib/steps/acl.py b/src/frostfs_testlib/steps/acl.py index 0ef101b..e97e4ee 100644 --- a/src/frostfs_testlib/steps/acl.py +++ b/src/frostfs_testlib/steps/acl.py @@ -8,8 +8,8 @@ from typing import List, Optional, Union import base58 +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -22,11 +22,10 @@ from frostfs_testlib.storage.dataclasses.acl import ( ) from frostfs_testlib.utils import wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Get extended ACL") +@reporter.step("Get extended ACL") def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]: cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) try: @@ -40,7 +39,7 @@ def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optiona return result.stdout -@reporter.step_deco("Set extended ACL") +@reporter.step("Set extended ACL") def set_eacl( wallet_path: str, cid: str, @@ -165,24 +164,20 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]: return rules -def sign_bearer( - shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool -) -> None: - frostfscli = FrostfsCli( - shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG - ) +def sign_bearer(shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: + frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG) frostfscli.util.sign_bearer_token( wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json ) -@reporter.step_deco("Wait for eACL cache expired") +@reporter.step("Wait for eACL cache expired") def wait_for_cache_expired(): sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT) return -@reporter.step_deco("Return bearer token in base64 to caller") +@reporter.step("Return bearer token in base64 to caller") def bearer_token_base64_from_file( bearer_path: str, ) -> str: diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 74f445a..be96138 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -5,8 +5,8 @@ from dataclasses import dataclass from time import sleep from typing import Optional, Union +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -17,7 +17,6 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.file_utils import generate_file, get_file_hash -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -47,7 +46,7 @@ class StorageContainer: def get_wallet_config_path(self) -> str: return self.storage_container_info.wallet_file.config_path - @reporter.step_deco("Generate new object and put in container") + @reporter.step("Generate new object and put in container") def generate_object( self, size: int, @@ -103,7 +102,7 @@ SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" -@reporter.step_deco("Create Container") +@reporter.step("Create Container") def create_container( wallet: str, shell: Shell, @@ -178,9 +177,7 @@ def wait_for_container_creation( return logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") sleep(sleep_interval) - raise RuntimeError( - f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting" - ) + raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") def wait_for_container_deletion( @@ -198,7 +195,7 @@ def wait_for_container_deletion( raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.") -@reporter.step_deco("List Containers") +@reporter.step("List Containers") def list_containers( wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT ) -> list[str]: @@ -219,7 +216,7 @@ def list_containers( return result.stdout.split() -@reporter.step_deco("List Objects in container") +@reporter.step("List Objects in container") def list_objects( wallet: str, shell: Shell, @@ -240,14 +237,12 @@ def list_objects( (list): list of containers """ cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.list_objects( - rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout - ) + result = cli.container.list_objects(rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout) logger.info(f"Container objects: \n{result}") return result.stdout.split() -@reporter.step_deco("Get Container") +@reporter.step("Get Container") def get_container( wallet: str, cid: str, @@ -271,9 +266,7 @@ def get_container( """ cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.get( - rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout - ) + result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout) if not json_mode: return result.stdout @@ -287,7 +280,7 @@ def get_container( return container_info -@reporter.step_deco("Delete Container") +@reporter.step("Delete Container") # TODO: make the error message about a non-found container more user-friendly def delete_container( wallet: str, @@ -350,7 +343,7 @@ def _parse_cid(output: str) -> str: return splitted[1] -@reporter.step_deco("Search container by name") +@reporter.step("Search container by name") def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str): list_cids = list_containers(wallet, shell, endpoint) for cid in list_cids: @@ -360,7 +353,7 @@ def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str return None -@reporter.step_deco("Search for nodes with a container") +@reporter.step("Search for nodes with a container") def search_nodes_with_container( wallet: str, cid: str, @@ -370,9 +363,7 @@ def search_nodes_with_container( timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> list[ClusterNode]: cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.search_node( - rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout - ) + result = cli.container.search_node(rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout) pattern = r"[0-9]+(?:\.[0-9]+){3}" nodes_ip = list(set(re.findall(pattern, result.stdout))) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 9c7c694..803524a 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -5,9 +5,9 @@ import re import uuid from typing import Any, Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli.neogo import NeoGo -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -16,10 +16,9 @@ from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output logger = logging.getLogger("NeoLogger") -reporter = get_reporter() -@reporter.step_deco("Get object from random node") +@reporter.step("Get object from random node") def get_object_from_random_node( wallet: str, cid: str, @@ -70,7 +69,7 @@ def get_object_from_random_node( ) -@reporter.step_deco("Get object from {endpoint}") +@reporter.step("Get object from {endpoint}") def get_object( wallet: str, cid: str, @@ -126,7 +125,7 @@ def get_object( return file_path -@reporter.step_deco("Get Range Hash from {endpoint}") +@reporter.step("Get Range Hash from {endpoint}") def get_range_hash( wallet: str, cid: str, @@ -176,7 +175,7 @@ def get_range_hash( return result.stdout.split(":")[1].strip() -@reporter.step_deco("Put object to random node") +@reporter.step("Put object to random node") def put_object_to_random_node( wallet: str, path: str, @@ -235,7 +234,7 @@ def put_object_to_random_node( ) -@reporter.step_deco("Put object at {endpoint} in container {cid}") +@reporter.step("Put object at {endpoint} in container {cid}") def put_object( wallet: str, path: str, @@ -296,7 +295,7 @@ def put_object( return oid.strip() -@reporter.step_deco("Delete object {cid}/{oid} from {endpoint}") +@reporter.step("Delete object {cid}/{oid} from {endpoint}") def delete_object( wallet: str, cid: str, @@ -344,7 +343,7 @@ def delete_object( return tombstone.strip() -@reporter.step_deco("Get Range") +@reporter.step("Get Range") def get_range( wallet: str, cid: str, @@ -397,7 +396,7 @@ def get_range( return range_file_path, content -@reporter.step_deco("Lock Object") +@reporter.step("Lock Object") def lock_object( wallet: str, cid: str, @@ -458,7 +457,7 @@ def lock_object( return oid.strip() -@reporter.step_deco("Search object") +@reporter.step("Search object") def search_object( wallet: str, cid: str, @@ -503,9 +502,7 @@ def search_object( cid=cid, bearer=bearer, xhdr=xhdr, - filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] - if filters - else None, + filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, session=session, phy=phy, root=root, @@ -517,19 +514,17 @@ def search_object( if expected_objects_list: if sorted(found_objects) == sorted(expected_objects_list): logger.info( - f"Found objects list '{found_objects}' " - f"is equal for expected list '{expected_objects_list}'" + f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'" ) else: logger.warning( - f"Found object list {found_objects} " - f"is not equal to expected list '{expected_objects_list}'" + f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'" ) return found_objects -@reporter.step_deco("Get netmap netinfo") +@reporter.step("Get netmap netinfo") def get_netmap_netinfo( wallet: str, shell: Shell, @@ -581,7 +576,7 @@ def get_netmap_netinfo( return settings -@reporter.step_deco("Head object") +@reporter.step("Head object") def head_object( wallet: str, cid: str, @@ -677,7 +672,7 @@ def head_object( return json_utils.decode_simple_header(decoded) -@reporter.step_deco("Run neo-go dump-keys") +@reporter.step("Run neo-go dump-keys") def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: """ Run neo-go dump keys command @@ -702,7 +697,7 @@ def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: return {address_id: wallet_key} -@reporter.step_deco("Run neo-go query height") +@reporter.step("Run neo-go query height") def neo_go_query_height(shell: Shell, endpoint: str) -> dict: """ Run neo-go query height command @@ -734,7 +729,7 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: } -@reporter.step_deco("Search object nodes") +@reporter.step("Search object nodes") def get_object_nodes( cluster: Cluster, wallet: str, diff --git a/src/frostfs_testlib/steps/complex_object_actions.py b/src/frostfs_testlib/steps/complex_object_actions.py index 54e5fc2..a67dd4c 100644 --- a/src/frostfs_testlib/steps/complex_object_actions.py +++ b/src/frostfs_testlib/steps/complex_object_actions.py @@ -12,7 +12,7 @@ import logging from typing import Optional, Tuple -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -20,7 +20,6 @@ from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -113,7 +112,7 @@ def get_complex_object_split_ranges( return ranges -@reporter.step_deco("Get Link Object") +@reporter.step("Get Link Object") def get_link_object( wallet: str, cid: str, @@ -166,7 +165,7 @@ def get_link_object( return None -@reporter.step_deco("Get Last Object") +@reporter.step("Get Last Object") def get_last_object( wallet: str, cid: str, diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index a589569..5a43ba3 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -2,8 +2,8 @@ import logging from time import sleep from typing import Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import ( CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, @@ -19,11 +19,10 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, Morp from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils import datetime_utils, wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Get epochs from nodes") +@reporter.step("Get epochs from nodes") def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: """ Get current epochs on each node. @@ -41,10 +40,8 @@ def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: return epochs_by_node -@reporter.step_deco("Ensure fresh epoch") -def ensure_fresh_epoch( - shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None -) -> int: +@reporter.step("Ensure fresh epoch") +def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None) -> int: # ensure new fresh epoch to avoid epoch switch during test session alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] current_epoch = get_epoch(shell, cluster, alive_node) @@ -54,7 +51,7 @@ def ensure_fresh_epoch( return epoch -@reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") +@reporter.step("Wait up to {timeout} seconds for nodes on cluster to align epochs") def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): @wait_for_success(timeout, 5, None, True) def check_epochs(): @@ -64,7 +61,7 @@ def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): check_epochs() -@reporter.step_deco("Get Epoch") +@reporter.step("Get Epoch") def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] endpoint = alive_node.get_rpc_endpoint() @@ -77,7 +74,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] return int(epoch.stdout) -@reporter.step_deco("Tick Epoch") +@reporter.step("Tick Epoch") def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): """ Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 2b70d6c..a8c9899 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -10,7 +10,7 @@ from urllib.parse import quote_plus import requests -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell @@ -21,15 +21,13 @@ from frostfs_testlib.storage.cluster import StorageNode from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils.file_utils import get_file_hash -reporter = get_reporter() - logger = logging.getLogger("NeoLogger") ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") local_shell = LocalShell() -@reporter.step_deco("Get via HTTP Gate") +@reporter.step("Get via HTTP Gate") def get_via_http_gate( cid: str, oid: str, @@ -53,9 +51,7 @@ def get_via_http_gate( else: request = f"{endpoint}{request_path}" - resp = requests.get( - request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False - ) + resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -75,10 +71,8 @@ def get_via_http_gate( return file_path -@reporter.step_deco("Get via Zip HTTP Gate") -def get_via_zip_http_gate( - cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300 -): +@reporter.step("Get via Zip HTTP Gate") +def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300): """ This function gets given object from HTTP gate cid: container id to get object from @@ -111,7 +105,7 @@ def get_via_zip_http_gate( return os.path.join(os.getcwd(), ASSETS_DIR, prefix) -@reporter.step_deco("Get via HTTP Gate by attribute") +@reporter.step("Get via HTTP Gate by attribute") def get_via_http_gate_by_attribute( cid: str, attribute: dict, @@ -136,9 +130,7 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get( - request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname} - ) + resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}) if not resp.ok: raise Exception( @@ -159,7 +151,7 @@ def get_via_http_gate_by_attribute( # TODO: pass http_hostname as a header -@reporter.step_deco("Upload via HTTP Gate") +@reporter.step("Upload via HTTP Gate") def upload_via_http_gate( cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 ) -> str: @@ -173,9 +165,7 @@ def upload_via_http_gate( request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post( - request, files=files, data=body, headers=headers, timeout=timeout, verify=False - ) + resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -193,7 +183,7 @@ def upload_via_http_gate( return resp.json().get("object_id") -@reporter.step_deco("Check is the passed object large") +@reporter.step("Check is the passed object large") def is_object_large(filepath: str) -> bool: """ This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE @@ -208,7 +198,7 @@ def is_object_large(filepath: str) -> bool: # TODO: pass http_hostname as a header -@reporter.step_deco("Upload via HTTP Gate using Curl") +@reporter.step("Upload via HTTP Gate using Curl") def upload_via_http_gate_curl( cid: str, filepath: str, @@ -256,7 +246,7 @@ def upload_via_http_gate_curl( @retry(max_attempts=3, sleep_interval=1) -@reporter.step_deco("Get via HTTP Gate using Curl") +@reporter.step("Get via HTTP Gate using Curl") def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: """ This function gets given object from HTTP gate using curl utility. @@ -280,7 +270,7 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): reporter.attach(command_attachment, f"{req_type} Request") -@reporter.step_deco("Try to get object and expect error") +@reporter.step("Try to get object and expect error") def try_to_get_object_and_expect_error( cid: str, oid: str, @@ -296,7 +286,7 @@ def try_to_get_object_and_expect_error( assert match, f"Expected {err} to match {error_pattern}" -@reporter.step_deco("Verify object can be get using HTTP header attribute") +@reporter.step("Verify object can be get using HTTP header attribute") def get_object_by_attr_and_verify_hashes( oid: str, file_name: str, @@ -305,9 +295,7 @@ def get_object_by_attr_and_verify_hashes( endpoint: str, http_hostname: str, ) -> None: - got_file_path_http = get_via_http_gate( - cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname - ) + got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) got_file_path_http_attr = get_via_http_gate_by_attribute( cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname ) @@ -348,9 +336,7 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter( - cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname - ) + got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -359,18 +345,14 @@ def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: st msg = "Expected hashes are equal for files {f1} and {f2}" got_file_hash_http = get_file_hash(got_file_1) assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1) - assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format( - f1=orig_file_name, f2=got_file_1 - ) + assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1) def attr_into_header(attrs: dict) -> dict: return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()} -@reporter.step_deco( - "Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'" -) +@reporter.step("Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'") def attr_into_str_header_curl(attrs: dict) -> list: headers = [] for k, v in attrs.items(): @@ -379,9 +361,7 @@ def attr_into_str_header_curl(attrs: dict) -> list: return headers -@reporter.step_deco( - "Try to get object via http (pass http_request and optional attributes) and expect error" -) +@reporter.step("Try to get object via http (pass http_request and optional attributes) and expect error") def try_to_get_object_via_passed_request_and_expect_error( cid: str, oid: str, diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py index a865461..64e235a 100644 --- a/src/frostfs_testlib/steps/network.py +++ b/src/frostfs_testlib/steps/network.py @@ -1,9 +1,7 @@ -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.test_control import retry -reporter = get_reporter() - class IpTablesHelper: @staticmethod @@ -21,11 +19,7 @@ class IpTablesHelper: @staticmethod def restore_input_traffic_to_port(node: ClusterNode) -> None: shell = node.host.get_shell() - ports = ( - shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'") - .stdout.strip() - .split("\n") - ) + ports = shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'").stdout.strip().split("\n") if ports[0] == "": return for port in ports: @@ -34,11 +28,7 @@ class IpTablesHelper: @staticmethod def restore_input_traffic_to_node(node: ClusterNode) -> None: shell = node.host.get_shell() - unlock_ip = ( - shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'") - .stdout.strip() - .split("\n") - ) + unlock_ip = shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'").stdout.strip().split("\n") if unlock_ip[0] == "": return for ip in unlock_ip: @@ -47,17 +37,17 @@ class IpTablesHelper: # TODO Move class to HOST class IfUpDownHelper: - @reporter.step_deco("Down {interface} to {node}") + @reporter.step("Down {interface} to {node}") def down_interface(self, node: ClusterNode, interface: str) -> None: shell = node.host.get_shell() shell.exec(f"ifdown {interface}") - @reporter.step_deco("Up {interface} to {node}") + @reporter.step("Up {interface} to {node}") def up_interface(self, node: ClusterNode, interface: str) -> None: shell = node.host.get_shell() shell.exec(f"ifup {interface}") - @reporter.step_deco("Up all interface to {node}") + @reporter.step("Up all interface to {node}") def up_all_interface(self, node: ClusterNode) -> None: shell = node.host.get_shell() interfaces = list(node.host.config.interfaces.keys()) @@ -65,7 +55,7 @@ class IfUpDownHelper: for name_interface in interfaces: self.check_state_up(node, name_interface) - @reporter.step_deco("Down all interface to {node}") + @reporter.step("Down all interface to {node}") def down_all_interface(self, node: ClusterNode) -> None: shell = node.host.get_shell() interfaces = list(node.host.config.interfaces.keys()) @@ -73,12 +63,10 @@ class IfUpDownHelper: for name_interface in interfaces: self.check_state_down(node, name_interface) - @reporter.step_deco("Check {node} to {interface}") + @reporter.step("Check {node} to {interface}") def check_state(self, node: ClusterNode, interface: str) -> str: shell = node.host.get_shell() - return shell.exec( - f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'" - ).stdout.strip() + return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip() @retry(max_attempts=5, sleep_interval=5, expected_result="UP") def check_state_up(self, node: ClusterNode, interface: str) -> str: diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index d91721c..28e3820 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -6,13 +6,9 @@ from dataclasses import dataclass from time import sleep from typing import Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.cli import ( - FROSTFS_ADM_CONFIG_PATH, - FROSTFS_ADM_EXEC, - FROSTFS_CLI_EXEC, -) +from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align @@ -20,7 +16,6 @@ from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils import datetime_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -40,7 +35,7 @@ class HealthStatus: return HealthStatus(network, health) -@reporter.step_deco("Get Locode from random storage node") +@reporter.step("Get Locode from random storage node") def get_locode_from_random_node(cluster: Cluster) -> str: node = random.choice(cluster.services(StorageNode)) locode = node.get_un_locode() @@ -48,7 +43,7 @@ def get_locode_from_random_node(cluster: Cluster) -> str: return locode -@reporter.step_deco("Healthcheck for storage node {node}") +@reporter.step("Healthcheck for storage node {node}") def storage_node_healthcheck(node: StorageNode) -> HealthStatus: """ The function returns storage node's health status. @@ -62,7 +57,7 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus: return HealthStatus.from_stdout(output) -@reporter.step_deco("Set status for {node}") +@reporter.step("Set status for {node}") def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: """ The function sets particular status for given node. @@ -75,7 +70,7 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> _run_control_command_with_retries(node, command, retries) -@reporter.step_deco("Get netmap snapshot") +@reporter.step("Get netmap snapshot") def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: """ The function returns string representation of netmap snapshot. @@ -95,7 +90,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: ).stdout -@reporter.step_deco("Get shard list for {node}") +@reporter.step("Get shard list for {node}") def node_shard_list(node: StorageNode) -> list[str]: """ The function returns list of shards for specified storage node. @@ -109,7 +104,7 @@ def node_shard_list(node: StorageNode) -> list[str]: return re.findall(r"Shard (.*):", output) -@reporter.step_deco("Shard set for {node}") +@reporter.step("Shard set for {node}") def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: """ The function sets mode for specified shard. @@ -120,7 +115,7 @@ def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: return _run_control_command_with_retries(node, command) -@reporter.step_deco("Drop object from {node}") +@reporter.step("Drop object from {node}") def drop_object(node: StorageNode, cid: str, oid: str) -> str: """ The function drops object from specified node. @@ -131,14 +126,14 @@ def drop_object(node: StorageNode, cid: str, oid: str) -> str: return _run_control_command_with_retries(node, command) -@reporter.step_deco("Delete data from host for node {node}") +@reporter.step("Delete data from host for node {node}") def delete_node_data(node: StorageNode) -> None: node.stop_service() node.host.delete_storage_node_data(node.name) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) -@reporter.step_deco("Exclude node {node_to_exclude} from network map") +@reporter.step("Exclude node {node_to_exclude} from network map") def exclude_node_from_network_map( node_to_exclude: StorageNode, alive_node: StorageNode, @@ -154,12 +149,10 @@ def exclude_node_from_network_map( wait_for_epochs_align(shell, cluster) snapshot = get_netmap_snapshot(node=alive_node, shell=shell) - assert ( - node_netmap_key not in snapshot - ), f"Expected node with key {node_netmap_key} to be absent in network map" + assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be absent in network map" -@reporter.step_deco("Include node {node_to_include} into network map") +@reporter.step("Include node {node_to_include} into network map") def include_node_to_network_map( node_to_include: StorageNode, alive_node: StorageNode, @@ -178,37 +171,29 @@ def include_node_to_network_map( check_node_in_map(node_to_include, shell, alive_node) -@reporter.step_deco("Check node {node} in network map") -def check_node_in_map( - node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None -) -> None: +@reporter.step("Check node {node} in network map") +def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: alive_node = alive_node or node node_netmap_key = node.get_wallet_public_key() logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") snapshot = get_netmap_snapshot(alive_node, shell) - assert ( - node_netmap_key in snapshot - ), f"Expected node with key {node_netmap_key} to be in network map" + assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" -@reporter.step_deco("Check node {node} NOT in network map") -def check_node_not_in_map( - node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None -) -> None: +@reporter.step("Check node {node} NOT in network map") +def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: alive_node = alive_node or node node_netmap_key = node.get_wallet_public_key() logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") snapshot = get_netmap_snapshot(alive_node, shell) - assert ( - node_netmap_key not in snapshot - ), f"Expected node with key {node_netmap_key} to be NOT in network map" + assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be NOT in network map" -@reporter.step_deco("Wait for node {node} is ready") +@reporter.step("Wait for node {node} is ready") def wait_for_node_to_be_ready(node: StorageNode) -> None: timeout, attempts = 30, 6 for _ in range(attempts): @@ -219,12 +204,10 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None: except Exception as err: logger.warning(f"Node {node} is not ready:\n{err}") sleep(timeout) - raise AssertionError( - f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds" - ) + raise AssertionError(f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds") -@reporter.step_deco("Remove nodes from network map trough cli-adm morph command") +@reporter.step("Remove nodes from network map trough cli-adm morph command") def remove_nodes_from_map_morph( shell: Shell, cluster: Cluster, diff --git a/src/frostfs_testlib/steps/payment_neogo.py b/src/frostfs_testlib/steps/payment_neogo.py index 7fe0b4d..8e78cca 100644 --- a/src/frostfs_testlib/steps/payment_neogo.py +++ b/src/frostfs_testlib/steps/payment_neogo.py @@ -8,21 +8,21 @@ from typing import Optional from neo3.wallet import utils as neo3_utils from neo3.wallet import wallet as neo3_wallet +from frostfs_testlib import reporter from frostfs_testlib.cli import NeoGo -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") EMPTY_PASSWORD = "" TX_PERSIST_TIMEOUT = 15 # seconds ASSET_POWER_SIDECHAIN = 10**12 + def get_nns_contract_hash(morph_chain: MorphChain) -> str: return morph_chain.rpc_client.get_contract_state(1)["hash"] @@ -39,6 +39,7 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] return bytes.decode(base64.b64decode(stack_data[0]["value"])) + def transaction_accepted(morph_chain: MorphChain, tx_id: str): """ This function returns True in case of accepted TX. @@ -62,7 +63,7 @@ def transaction_accepted(morph_chain: MorphChain, tx_id: str): return False -@reporter.step_deco("Get FrostFS Balance") +@reporter.step("Get FrostFS Balance") def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): """ This function returns FrostFS balance for given wallet. @@ -82,7 +83,8 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_ logger.error(f"failed to get wallet balance: {out}") raise out -@reporter.step_deco("Transfer Gas") + +@reporter.step("Transfer Gas") def transfer_gas( shell: Shell, amount: int, @@ -111,16 +113,10 @@ def transfer_gas( """ wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() wallet_from_password = ( - wallet_from_password - if wallet_from_password is not None - else morph_chain.get_wallet_password() - ) - address_from = address_from or wallet_utils.get_last_address_from_wallet( - wallet_from_path, wallet_from_password - ) - address_to = address_to or wallet_utils.get_last_address_from_wallet( - wallet_to_path, wallet_to_password + wallet_from_password if wallet_from_password is not None else morph_chain.get_wallet_password() ) + address_from = address_from or wallet_utils.get_last_address_from_wallet(wallet_from_path, wallet_from_password) + address_to = address_to or wallet_utils.get_last_address_from_wallet(wallet_to_path, wallet_to_password) neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) out = neogo.nep17.transfer( @@ -141,7 +137,7 @@ def transfer_gas( time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) -@reporter.step_deco("Get Sidechain Balance") +@reporter.step("Get Sidechain Balance") def get_sidechain_balance(morph_chain: MorphChain, address: str): resp = morph_chain.rpc_client.get_nep17_balances(address=address) logger.info(f"Got getnep17balances response: {resp}") diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index d746337..1d7adfa 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -8,27 +8,23 @@ from typing import Optional from dateutil.parser import parse +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell from frostfs_testlib.shell.interfaces import SshCredentials -from frostfs_testlib.steps.cli.container import ( - search_container_by_name, - search_nodes_with_container, -) +from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils.cli_utils import _run_with_passwd -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Expected all objects are presented in the bucket") +@reporter.step("Expected all objects are presented in the bucket") def check_objects_in_bucket( s3_client: S3ClientWrapper, bucket: str, @@ -37,13 +33,9 @@ def check_objects_in_bucket( ) -> None: unexpected_objects = unexpected_objects or [] bucket_objects = s3_client.list_objects(bucket) - assert len(bucket_objects) == len( - expected_objects - ), f"Expected {len(expected_objects)} objects in the bucket" + assert len(bucket_objects) == len(expected_objects), f"Expected {len(expected_objects)} objects in the bucket" for bucket_object in expected_objects: - assert ( - bucket_object in bucket_objects - ), f"Expected object {bucket_object} in objects list {bucket_objects}" + assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}" for bucket_object in unexpected_objects: assert ( @@ -51,21 +43,17 @@ def check_objects_in_bucket( ), f"Expected object {bucket_object} not in objects list {bucket_objects}" -@reporter.step_deco("Try to get object and got error") -def try_to_get_objects_and_expect_error( - s3_client: S3ClientWrapper, bucket: str, object_keys: list -) -> None: +@reporter.step("Try to get object and got error") +def try_to_get_objects_and_expect_error(s3_client: S3ClientWrapper, bucket: str, object_keys: list) -> None: for obj in object_keys: try: s3_client.get_object(bucket, obj) raise AssertionError(f"Object {obj} found in bucket {bucket}") except Exception as err: - assert "The specified key does not exist" in str( - err - ), f"Expected error in exception {err}" + assert "The specified key does not exist" in str(err), f"Expected error in exception {err}" -@reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'") +@reporter.step("Set versioning status to '{status}' for bucket '{bucket}'") def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): if status == VersioningStatus.UNDEFINED: return @@ -83,12 +71,8 @@ def object_key_from_file_path(full_path: str) -> str: def assert_tags( actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None ) -> None: - expected_tags = ( - [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] - ) - unexpected_tags = ( - [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] - ) + expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] + unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] if expected_tags == []: assert not actual_tags, f"Expected there is no tags, got {actual_tags}" assert len(expected_tags) == len(actual_tags) @@ -98,7 +82,7 @@ def assert_tags( assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" -@reporter.step_deco("Expected all tags are presented in object") +@reporter.step("Expected all tags are presented in object") def check_tags_by_object( s3_client: S3ClientWrapper, bucket: str, @@ -107,12 +91,10 @@ def check_tags_by_object( unexpected_tags: Optional[list] = None, ) -> None: actual_tags = s3_client.get_object_tagging(bucket, key) - assert_tags( - expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags - ) + assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) -@reporter.step_deco("Expected all tags are presented in bucket") +@reporter.step("Expected all tags are presented in bucket") def check_tags_by_bucket( s3_client: S3ClientWrapper, bucket: str, @@ -120,9 +102,7 @@ def check_tags_by_bucket( unexpected_tags: Optional[list] = None, ) -> None: actual_tags = s3_client.get_bucket_tagging(bucket) - assert_tags( - expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags - ) + assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) def assert_object_lock_mode( @@ -135,25 +115,19 @@ def assert_object_lock_mode( retain_period: Optional[int] = None, ): object_dict = s3_client.get_object(bucket, file_name, full_output=True) - assert ( - object_dict.get("ObjectLockMode") == object_lock_mode - ), f"Expected Object Lock Mode is {object_lock_mode}" + assert object_dict.get("ObjectLockMode") == object_lock_mode, f"Expected Object Lock Mode is {object_lock_mode}" assert ( object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" object_retain_date = object_dict.get("ObjectLockRetainUntilDate") - retain_date = ( - parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date - ) + retain_date = parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date if retain_until_date: assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( "%Y-%m-%dT%H:%M:%S" ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' elif retain_period: last_modify_date = object_dict.get("LastModified") - last_modify = ( - parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date - ) + last_modify = parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date assert ( retain_date - last_modify + timedelta(seconds=1) ).days == retain_period, f"Expected retention period is {retain_period} days" @@ -187,7 +161,7 @@ def assert_s3_acl(acl_grants: list, permitted_users: str): logger.error("FULL_CONTROL is given to All Users") -@reporter.step_deco("Init S3 Credentials") +@reporter.step("Init S3 Credentials") def init_s3_credentials( wallet: WalletInfo, shell: Shell, @@ -213,24 +187,18 @@ def init_s3_credentials( container_placement_policy=container_placement_policy, ).stdout aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( - "aws_access_key_id" - ) + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id") ) aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", issue_secret_output - ).group("aws_secret_access_key") - ) - cid = str( - re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group( - "container_id" + re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group( + "aws_secret_access_key" ) ) + cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) return cid, aws_access_key_id, aws_secret_access_key -@reporter.step_deco("Delete bucket with all objects") +@reporter.step("Delete bucket with all objects") def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): versioning_status = s3_client.get_bucket_versioning_status(bucket) if versioning_status == VersioningStatus.ENABLED.value: @@ -255,7 +223,7 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): s3_client.delete_bucket(bucket) -@reporter.step_deco("Search nodes bucket") +@reporter.step("Search nodes bucket") def search_nodes_with_bucket( cluster: Cluster, bucket_name: str, @@ -264,7 +232,5 @@ def search_nodes_with_bucket( endpoint: str, ) -> list[ClusterNode]: cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint) - nodes_list = search_nodes_with_container( - wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster - ) + nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) return nodes_list diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py index b82d0e2..6c87cac 100644 --- a/src/frostfs_testlib/steps/session_token.py +++ b/src/frostfs_testlib/steps/session_token.py @@ -7,8 +7,8 @@ from dataclasses import dataclass from enum import Enum from typing import Any, Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -17,7 +17,6 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import json_utils, wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") UNRELATED_KEY = "unrelated key in the session" @@ -50,7 +49,7 @@ class Lifetime: iat: int = 0 -@reporter.step_deco("Generate Session Token") +@reporter.step("Generate Session Token") def generate_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -72,9 +71,7 @@ def generate_session_token( file_path = os.path.join(tokens_dir, str(uuid.uuid4())) - pub_key_64 = wallet_utils.get_wallet_public_key( - session_wallet.path, session_wallet.password, "base64" - ) + pub_key_64 = wallet_utils.get_wallet_public_key(session_wallet.path, session_wallet.password, "base64") lifetime = lifetime or Lifetime() @@ -99,7 +96,7 @@ def generate_session_token( return file_path -@reporter.step_deco("Generate Session Token For Container") +@reporter.step("Generate Session Token For Container") def generate_container_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -126,11 +123,7 @@ def generate_container_session_token( "container": { "verb": verb.value, "wildcard": cid is None, - **( - {"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} - if cid is not None - else {} - ), + **({"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} if cid is not None else {}), }, } @@ -143,7 +136,7 @@ def generate_container_session_token( ) -@reporter.step_deco("Generate Session Token For Object") +@reporter.step("Generate Session Token For Object") def generate_object_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -185,7 +178,7 @@ def generate_object_session_token( ) -@reporter.step_deco("Get signed token for container session") +@reporter.step("Get signed token for container session") def get_container_signed_token( owner_wallet: WalletInfo, user_wallet: WalletInfo, @@ -207,7 +200,7 @@ def get_container_signed_token( return sign_session_token(shell, session_token_file, owner_wallet) -@reporter.step_deco("Get signed token for object session") +@reporter.step("Get signed token for object session") def get_object_signed_token( owner_wallet: WalletInfo, user_wallet: WalletInfo, @@ -234,7 +227,7 @@ def get_object_signed_token( return sign_session_token(shell, session_token_file, owner_wallet) -@reporter.step_deco("Create Session Token") +@reporter.step("Create Session Token") def create_session_token( shell: Shell, owner: str, @@ -265,7 +258,7 @@ def create_session_token( return session_token -@reporter.step_deco("Sign Session Token") +@reporter.step("Sign Session Token") def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str: """ This function signs the session token by the given wallet. @@ -279,10 +272,6 @@ def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) - The path to the signed token. """ signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli( - shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG - ) - frostfscli.util.sign_session_token( - wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file - ) + frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG) + frostfscli.util.sign_session_token(wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file) return signed_token_file diff --git a/src/frostfs_testlib/steps/storage_object.py b/src/frostfs_testlib/steps/storage_object.py index 7776754..ce1bb94 100644 --- a/src/frostfs_testlib/steps/storage_object.py +++ b/src/frostfs_testlib/steps/storage_object.py @@ -3,7 +3,7 @@ from time import sleep import pytest -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import delete_object, get_object @@ -12,16 +12,13 @@ from frostfs_testlib.steps.tombstone import verify_head_tombstone from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo -reporter = get_reporter() logger = logging.getLogger("NeoLogger") CLEANUP_TIMEOUT = 10 -@reporter.step_deco("Delete Objects") -def delete_objects( - storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster -) -> None: +@reporter.step("Delete Objects") +def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster) -> None: """ Deletes given storage objects. diff --git a/src/frostfs_testlib/steps/storage_policy.py b/src/frostfs_testlib/steps/storage_policy.py index eca25d2..d2202a4 100644 --- a/src/frostfs_testlib/steps/storage_policy.py +++ b/src/frostfs_testlib/steps/storage_policy.py @@ -6,7 +6,7 @@ """ import logging -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object @@ -14,14 +14,11 @@ from frostfs_testlib.steps.complex_object_actions import get_last_object from frostfs_testlib.storage.cluster import StorageNode from frostfs_testlib.utils import string_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Get Object Copies") -def get_object_copies( - complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> int: +@reporter.step("Get Object Copies") +def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ The function performs requests to all nodes of the container and finds out if they store a copy of the object. The procedure is @@ -45,10 +42,8 @@ def get_object_copies( ) -@reporter.step_deco("Get Simple Object Copies") -def get_simple_object_copies( - wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> int: +@reporter.step("Get Simple Object Copies") +def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ To figure out the number of a simple object copies, only direct HEAD requests should be made to the every node of the container. @@ -66,9 +61,7 @@ def get_simple_object_copies( copies = 0 for node in nodes: try: - response = head_object( - wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True - ) + response = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) if response: logger.info(f"Found object {oid} on node {node}") copies += 1 @@ -78,10 +71,8 @@ def get_simple_object_copies( return copies -@reporter.step_deco("Get Complex Object Copies") -def get_complex_object_copies( - wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> int: +@reporter.step("Get Complex Object Copies") +def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ To figure out the number of a complex object copies, we firstly need to retrieve its Last object. We consider that the number of @@ -102,10 +93,8 @@ def get_complex_object_copies( return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) -@reporter.step_deco("Get Nodes With Object") -def get_nodes_with_object( - cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> list[StorageNode]: +@reporter.step("Get Nodes With Object") +def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: """ The function returns list of nodes which store the given object. @@ -141,7 +130,7 @@ def get_nodes_with_object( return nodes_list -@reporter.step_deco("Get Nodes Without Object") +@reporter.step("Get Nodes Without Object") def get_nodes_without_object( wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] ) -> list[StorageNode]: @@ -160,9 +149,7 @@ def get_nodes_without_object( nodes_list = [] for node in nodes: try: - res = head_object( - wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True - ) + res = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) if res is None: nodes_list.append(node) except Exception as err: diff --git a/src/frostfs_testlib/steps/tombstone.py b/src/frostfs_testlib/steps/tombstone.py index a46cf77..b468c93 100644 --- a/src/frostfs_testlib/steps/tombstone.py +++ b/src/frostfs_testlib/steps/tombstone.py @@ -3,18 +3,15 @@ import logging from neo3.wallet import wallet -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Verify Head Tombstone") -def verify_head_tombstone( - wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str -): +@reporter.step("Verify Head Tombstone") +def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] @@ -30,12 +27,6 @@ def verify_head_tombstone( assert header["ownerID"] == addr, "Tombstone Owner ID is wrong" assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" - assert ( - header["sessionToken"]["body"]["object"]["verb"] == "DELETE" - ), "Header Session Type isn't DELETE" - assert ( - header["sessionToken"]["body"]["object"]["target"]["container"] == cid - ), "Header Session ID is wrong" - assert ( - oid in header["sessionToken"]["body"]["object"]["target"]["objects"] - ), "Header Session OID is wrong" + assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE" + assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong" + assert oid in header["sessionToken"]["body"]["object"]["target"]["objects"], "Header Session OID is wrong" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 02601ac..313215a 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -4,9 +4,9 @@ import re import yaml from yarl import URL +from frostfs_testlib import reporter from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting.config import ServiceConfig -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration @@ -16,8 +16,6 @@ from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry -reporter = get_reporter() - class ClusterNode: """ diff --git a/src/frostfs_testlib/storage/configuration/service_configuration.py b/src/frostfs_testlib/storage/configuration/service_configuration.py index 1aa7846..f7b3be7 100644 --- a/src/frostfs_testlib/storage/configuration/service_configuration.py +++ b/src/frostfs_testlib/storage/configuration/service_configuration.py @@ -4,13 +4,11 @@ from typing import Any import yaml -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell.interfaces import CommandOptions from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.dataclasses.node_base import ServiceClass -reporter = get_reporter() - class ServiceConfiguration(ServiceConfigurationYml): def __init__(self, service: "ServiceClass") -> None: diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 8ecada8..003bb6b 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -2,18 +2,16 @@ import copy from typing import Optional import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib import reporter from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import run_optionally -reporter = get_reporter() - class BackgroundLoadController: k6_dir: str @@ -86,7 +84,7 @@ class BackgroundLoadController: return all_endpoints[load_type][endpoint_selection_strategy] @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Prepare load instances") + @reporter.step("Prepare load instances") def prepare(self): self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) @@ -99,7 +97,7 @@ class BackgroundLoadController: self.started = True @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop load") + @reporter.step("Stop load") def stop(self): self.runner.stop() @@ -108,7 +106,7 @@ class BackgroundLoadController: return self.runner.is_running @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Reset load") + @reporter.step("Reset load") def _reset_for_consequent_load(self): """This method is required if we want to run multiple loads during test run. Raise load counter by 1 and append it to load_id @@ -118,7 +116,7 @@ class BackgroundLoadController: self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Startup load") + @reporter.step("Startup load") def startup(self): self.prepare() self.preset() @@ -129,7 +127,7 @@ class BackgroundLoadController: self.runner.preset() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop and get results of load") + @reporter.step("Stop and get results of load") def teardown(self, load_report: Optional[LoadReport] = None): if not self.started: return @@ -141,7 +139,7 @@ class BackgroundLoadController: load_report.add_summaries(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Run post-load verification") + @reporter.step("Run post-load verification") def verify(self): try: load_issues = self._collect_load_issues() @@ -153,7 +151,7 @@ class BackgroundLoadController: self._reset_for_consequent_load() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Collect load issues") + @reporter.step("Collect load issues") def _collect_load_issues(self): verifier = LoadVerifier(self.load_params) return verifier.collect_load_issues(self.load_summaries) @@ -163,7 +161,7 @@ class BackgroundLoadController: self.runner.wait_until_finish(soft_timeout) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify loaded objects") + @reporter.step("Verify loaded objects") def _run_verify_scenario(self) -> list[str]: self.verification_params = LoadParams( verify_clients=self.load_params.verify_clients, diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 7020671..35ab6c1 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -4,12 +4,12 @@ import time from typing import TypeVar import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.hosting.interfaces import HostStatus from frostfs_testlib.plugins import load_all -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider @@ -21,7 +21,6 @@ from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time -reporter = get_reporter() logger = logging.getLogger("NeoLogger") if_up_down_helper = IfUpDownHelper() @@ -76,7 +75,7 @@ class ClusterStateController: return online_svc @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop host of node {node}") + @reporter.step("Stop host of node {node}") def stop_node_host(self, node: ClusterNode, mode: str): # Drop ssh connection for this node before shutdown provider = SshConnectionProvider() @@ -88,7 +87,7 @@ class ClusterStateController: self._wait_for_host_offline(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Shutdown whole cluster") + @reporter.step("Shutdown whole cluster") def shutdown_cluster(self, mode: str, reversed_order: bool = False): nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes @@ -105,7 +104,7 @@ class ClusterStateController: self._wait_for_host_offline(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start host of node {node}") + @reporter.step("Start host of node {node}") def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True): with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() @@ -115,7 +114,7 @@ class ClusterStateController: self.wait_startup_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped hosts") + @reporter.step("Start stopped hosts") def start_stopped_hosts(self, reversed_order: bool = False): if not self.stopped_nodes: return @@ -133,35 +132,35 @@ class ClusterStateController: self.wait_after_storage_startup() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}") + @reporter.step("Detach disk {device} at {mountpoint} on node {node}") def detach_disk(self, node: StorageNode, device: str, mountpoint: str): disk_controller = self._get_disk_controller(node, device, mountpoint) self.detached_disks[disk_controller.id] = disk_controller disk_controller.detach() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}") + @reporter.step("Attach disk {device} at {mountpoint} on node {node}") def attach_disk(self, node: StorageNode, device: str, mountpoint: str): disk_controller = self._get_disk_controller(node, device, mountpoint) disk_controller.attach() self.detached_disks.pop(disk_controller.id, None) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Restore detached disks") + @reporter.step("Restore detached disks") def restore_disks(self): for disk_controller in self.detached_disks.values(): disk_controller.attach() self.detached_disks = {} @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all {service_type} services") + @reporter.step("Stop all {service_type} services") def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True): services = self.cluster.services(service_type) self.stopped_services.update(services) parallel([service.stop_service for service in services], mask=mask) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start all {service_type} services") + @reporter.step("Start all {service_type} services") def start_services_of_type(self, service_type: type[ServiceClass]): services = self.cluster.services(service_type) parallel([service.start_service for service in services]) @@ -176,24 +175,24 @@ class ClusterStateController: result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node" - @reporter.step_deco("Wait for S3Gates reconnection to local storage") + @reporter.step("Wait for S3Gates reconnection to local storage") def wait_s3gates(self): online_s3gates = self._get_online(S3Gate) if online_s3gates: parallel(self.wait_s3gate, online_s3gates) - @reporter.step_deco("Wait for cluster startup healtcheck") + @reporter.step("Wait for cluster startup healtcheck") def wait_startup_healthcheck(self): nodes = self.cluster.nodes(self._get_online(StorageNode)) parallel(self.healthcheck.startup_healthcheck, nodes) - @reporter.step_deco("Wait for storage reconnection to the system") + @reporter.step("Wait for storage reconnection to the system") def wait_after_storage_startup(self): self.wait_startup_healthcheck() self.wait_s3gates() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start all stopped services") + @reporter.step("Start all stopped services") def start_all_stopped_services(self): stopped_storages = self._get_stopped_by_type(StorageNode) parallel([service.start_service for service in self.stopped_services]) @@ -203,21 +202,21 @@ class ClusterStateController: self.wait_after_storage_startup() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop {service_type} service on {node}") + @reporter.step("Stop {service_type} service on {node}") def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): service = node.service(service_type) service.stop_service(mask) self.stopped_services.add(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start {service_type} service on {node}") + @reporter.step("Start {service_type} service on {node}") def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): service = node.service(service_type) service.start_service() self.stopped_services.discard(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start all stopped {service_type} services") + @reporter.step("Start all stopped {service_type} services") def start_stopped_services_of_type(self, service_type: type[ServiceClass]): stopped_svc = self._get_stopped_by_type(service_type) if not stopped_svc: @@ -231,7 +230,7 @@ class ClusterStateController: # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all storage services on cluster") + @reporter.step("Stop all storage services on cluster") def stop_all_storage_services(self, reversed_order: bool = False): nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes @@ -240,7 +239,7 @@ class ClusterStateController: # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all S3 gates on cluster") + @reporter.step("Stop all S3 gates on cluster") def stop_all_s3_gates(self, reversed_order: bool = False): nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes @@ -249,42 +248,42 @@ class ClusterStateController: # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop storage service on {node}") + @reporter.step("Stop storage service on {node}") def stop_storage_service(self, node: ClusterNode, mask: bool = True): self.stop_service_of_type(node, StorageNode, mask) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start storage service on {node}") + @reporter.step("Start storage service on {node}") def start_storage_service(self, node: ClusterNode): self.start_service_of_type(node, StorageNode) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped storage services") + @reporter.step("Start stopped storage services") def start_stopped_storage_services(self): self.start_stopped_services_of_type(StorageNode) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop s3 gate on {node}") + @reporter.step("Stop s3 gate on {node}") def stop_s3_gate(self, node: ClusterNode, mask: bool = True): self.stop_service_of_type(node, S3Gate, mask) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start s3 gate on {node}") + @reporter.step("Start s3 gate on {node}") def start_s3_gate(self, node: ClusterNode): self.start_service_of_type(node, S3Gate) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped S3 gates") + @reporter.step("Start stopped S3 gates") def start_stopped_s3_gates(self): self.start_stopped_services_of_type(S3Gate) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Suspend {process_name} service in {node}") + @reporter.step("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): node.host.wait_success_suspend_process(process_name) if self.suspended_services.get(process_name): @@ -293,20 +292,20 @@ class ClusterStateController: self.suspended_services[process_name] = [node] @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Resume {process_name} service in {node}") + @reporter.step("Resume {process_name} service in {node}") def resume_service(self, process_name: str, node: ClusterNode): node.host.wait_success_resume_process(process_name) if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]: self.suspended_services[process_name].remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start suspend processes services") + @reporter.step("Start suspend processes services") def resume_suspended_services(self): for process_name, list_nodes in self.suspended_services.items(): [node.host.wait_success_resume_process(process_name) for node in list_nodes] self.suspended_services = {} - @reporter.step_deco("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") + @reporter.step("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") def drop_traffic( self, mode: str, @@ -327,7 +326,7 @@ class ClusterStateController: time.sleep(wakeup_timeout) self.dropped_traffic.append(node) - @reporter.step_deco("Ping traffic") + @reporter.step("Ping traffic") def ping_traffic( self, node: ClusterNode, @@ -343,7 +342,7 @@ class ClusterStateController: return False return True - @reporter.step_deco("Start traffic to {node}") + @reporter.step("Start traffic to {node}") def restore_traffic( self, mode: str, @@ -358,12 +357,12 @@ class ClusterStateController: case "nodes": IpTablesHelper.restore_input_traffic_to_node(node=node) - @reporter.step_deco("Restore blocked nodes") + @reporter.step("Restore blocked nodes") def restore_all_traffic(self): parallel(self._restore_traffic_to_node, self.dropped_traffic) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Hard reboot host {node} via magic SysRq option") + @reporter.step("Hard reboot host {node} via magic SysRq option") def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True): shell = node.host.get_shell() shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') @@ -383,14 +382,14 @@ class ClusterStateController: if startup_healthcheck: self.wait_startup_healthcheck() - @reporter.step_deco("Down {interface} to {nodes}") + @reporter.step("Down {interface} to {nodes}") def down_interface(self, nodes: list[ClusterNode], interface: str): for node in nodes: if_up_down_helper.down_interface(node=node, interface=interface) assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN" self.nodes_with_modified_interface.append(node) - @reporter.step_deco("Up {interface} to {nodes}") + @reporter.step("Up {interface} to {nodes}") def up_interface(self, nodes: list[ClusterNode], interface: str): for node in nodes: if_up_down_helper.up_interface(node=node, interface=interface) @@ -398,17 +397,17 @@ class ClusterStateController: if node in self.nodes_with_modified_interface: self.nodes_with_modified_interface.remove(node) - @reporter.step_deco("Restore interface") + @reporter.step("Restore interface") def restore_interfaces(self): for node in self.nodes_with_modified_interface: if_up_down_helper.up_all_interface(node) - @reporter.step_deco("Get node time") + @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: shell = node.host.get_shell() return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") - @reporter.step_deco("Set node time to {in_date}") + @reporter.step("Set node time to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") @@ -417,7 +416,7 @@ class ClusterStateController: with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) - @reporter.step_deco(f"Restore time") + @reporter.step(f"Restore time") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() now_time = datetime.datetime.now(datetime.timezone.utc) @@ -425,14 +424,14 @@ class ClusterStateController: shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") shell.exec("hwclock --systohc") - @reporter.step_deco("Change the synchronizer status to {status}") + @reporter.step("Change the synchronizer status to {status}") def set_sync_date_all_nodes(self, status: str): if status == "active": parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) return parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) - @reporter.step_deco("Set MaintenanceModeAllowed - {status}") + @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: frostfs_adm = FrostfsAdm( shell=cluster_node.host.get_shell(), @@ -441,7 +440,7 @@ class ClusterStateController: ) frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") - @reporter.step_deco("Set mode node to {status}") + @reporter.step("Set mode node to {status}") def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None: rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() @@ -465,8 +464,7 @@ class ClusterStateController: self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node) - @wait_for_success(80, 8) - @reporter.step_deco("Check status node, status - {status}") + @wait_for_success(80, 8, title="Wait for storage status become {status}") def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode): frostfs_cli = FrostfsCli( shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG @@ -537,13 +535,13 @@ class ClusterStateController: interfaces.append(ip) return interfaces - @reporter.step_deco("Ping node") + @reporter.step("Ping node") def _ping_host(self, node: ClusterNode): options = CommandOptions(check=False) return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.ONLINE) - @reporter.step_deco("Waiting for {node} to go online") + @reporter.step("Waiting for {node} to go online") def _wait_for_host_online(self, node: ClusterNode): try: ping_result = self._ping_host(node) @@ -555,7 +553,7 @@ class ClusterStateController: return HostStatus.OFFLINE @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.OFFLINE) - @reporter.step_deco("Waiting for {node} to go offline") + @reporter.step("Waiting for {node} to go offline") def _wait_for_host_offline(self, node: ClusterNode): try: ping_result = self._ping_host(node) diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py index 078d483..66f72d6 100644 --- a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -1,13 +1,11 @@ from typing import Any -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.testing import parallel -reporter = get_reporter() - class ConfigStateManager(StateManager): def __init__(self, cluster_state_controller: ClusterStateController) -> None: @@ -15,7 +13,7 @@ class ConfigStateManager(StateManager): self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set() self.cluster = self.csc.cluster - @reporter.step_deco("Change configuration for {service_type} on all nodes") + @reporter.step("Change configuration for {service_type} on all nodes") def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): services = self.cluster.services(service_type) nodes = self.cluster.nodes(services) @@ -25,7 +23,7 @@ class ConfigStateManager(StateManager): parallel([node.config(service_type).set for node in nodes], values=values) self.csc.start_services_of_type(service_type) - @reporter.step_deco("Change configuration for {service_type} on {node}") + @reporter.step("Change configuration for {service_type} on {node}") def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): self.services_with_changed_config.add((node, service_type)) @@ -33,7 +31,7 @@ class ConfigStateManager(StateManager): node.config(service_type).set(values) self.csc.start_service_of_type(node, service_type) - @reporter.step_deco("Revert all configuration changes") + @reporter.step("Revert all configuration changes") def revert_all(self): if not self.services_with_changed_config: return @@ -44,7 +42,7 @@ class ConfigStateManager(StateManager): self.csc.start_all_stopped_services() # TODO: parallel can't have multiple parallel_items :( - @reporter.step_deco("Revert all configuration {node_and_service}") + @reporter.step("Revert all configuration {node_and_service}") def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): node, service_type = node_and_service self.csc.stop_service_of_type(node, service_type) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 4b9ffc2..ace0214 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -4,16 +4,14 @@ from typing import Optional, TypedDict, TypeVar import yaml +from frostfs_testlib import reporter from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils -reporter = get_reporter() - @dataclass class NodeBase(HumanReadableABC): diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index 0676813..49c6afd 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -1,7 +1,7 @@ import time from typing import Optional -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps import epoch @@ -9,15 +9,13 @@ from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.utils import datetime_utils -reporter = get_reporter() - # To skip adding every mandatory singleton dependency to EACH test function class ClusterTestBase: shell: Shell cluster: Cluster - @reporter.step_deco("Tick {epochs_to_tick} epochs, wait {wait_block} block") + @reporter.step("Tick {epochs_to_tick} epochs, wait {wait_block} block") def tick_epochs( self, epochs_to_tick: int, diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index e1dfcd1..41d52ab 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -19,10 +19,9 @@ from typing import Dict, List, TypedDict, Union import pexpect -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo -reporter = get_reporter() logger = logging.getLogger("NeoLogger") COLOR_GREEN = "\033[92m" COLOR_OFF = "\033[0m" @@ -65,9 +64,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = return cmd.decode() -def _attach_allure_log( - cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime -) -> None: +def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime) -> None: command_attachment = ( f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" diff --git a/src/frostfs_testlib/utils/env_utils.py b/src/frostfs_testlib/utils/env_utils.py index 6b4fb40..3fdebe1 100644 --- a/src/frostfs_testlib/utils/env_utils.py +++ b/src/frostfs_testlib/utils/env_utils.py @@ -1,13 +1,12 @@ import logging import re -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Read environment.properties") +@reporter.step("Read environment.properties") def read_env_properties(file_path: str) -> dict: with open(file_path, "r") as file: raw_content = file.read() @@ -23,7 +22,7 @@ def read_env_properties(file_path: str) -> dict: return env_properties -@reporter.step_deco("Update data in environment.properties") +@reporter.step("Update data in environment.properties") def save_env_properties(file_path: str, env_data: dict) -> None: with open(file_path, "a+") as env_file: for env, env_value in env_data.items(): diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index 507168e..5c4d52f 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -3,7 +3,7 @@ from dataclasses import dataclass from time import sleep from typing import Optional -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import neo_go_dump_keys @@ -15,12 +15,10 @@ from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time -reporter = get_reporter() - logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Check and return status of given service") +@reporter.step("Check and return status of given service") def service_status(service: str, shell: Shell) -> str: return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() @@ -73,14 +71,14 @@ class TopCommand: ) -@reporter.step_deco("Run `top` command with specified PID") +@reporter.step("Run `top` command with specified PID") def service_status_top(service: str, shell: Shell) -> TopCommand: pid = service_pid(service, shell) output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout return TopCommand.from_stdout(output, pid) -@reporter.step_deco("Restart service n times with sleep") +@reporter.step("Restart service n times with sleep") def multiple_restart( service_type: type[NodeBase], node: ClusterNode, @@ -95,8 +93,7 @@ def multiple_restart( sleep(sleep_interval) -@reporter.step_deco("Get status of list of services and check expected status") -@wait_for_success(60, 5) +@wait_for_success(60, 5, title="Wait for services become {expected_status} on node {cluster_node}") def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str): cmd = "" for service in service_list: @@ -112,8 +109,7 @@ def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceC ), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}" -@reporter.step_deco("Wait for active status of passed service") -@wait_for_success(60, 5) +@wait_for_success(60, 5, title="Wait for {service} become active") def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"): real_status = service_status(service=service, shell=shell) assert ( @@ -121,8 +117,7 @@ def wait_service_in_desired_state(service: str, shell: Shell, expected_status: O ), f"Service {service}: expected status= {expected_status}, real status {real_status}" -@reporter.step_deco("Run healthcheck against passed service") -@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1) +@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1, title="Wait for {service_type} passes healtcheck on {node}") def service_type_healthcheck( service_type: type[NodeBase], node: ClusterNode, @@ -133,26 +128,25 @@ def service_type_healthcheck( ), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}" -@reporter.step_deco("Kill by process name") +@reporter.step("Kill by process name") def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode): service_systemctl_name = node.service(service_type).get_service_systemctl_name() pid = service_pid(service_systemctl_name, node.host.get_shell()) node.host.get_shell().exec(f"sudo kill -9 {pid}") -@reporter.step_deco("Service {service} suspend") +@reporter.step("Suspend {service}") def suspend_service(shell: Shell, service: str): shell.exec(f"sudo kill -STOP {service_pid(service, shell)}") -@reporter.step_deco("Service {service} resume") +@reporter.step("Resume {service}") def resume_service(shell: Shell, service: str): shell.exec(f"sudo kill -CONT {service_pid(service, shell)}") -@reporter.step_deco("Retrieve service's pid") # retry mechanism cause when the task has been started recently '0' PID could be returned -@wait_for_success(10, 1) +@wait_for_success(10, 1, title="Get {service} pid") def service_pid(service: str, shell: Shell) -> int: output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip() splitted = output.split("=") @@ -161,7 +155,7 @@ def service_pid(service: str, shell: Shell) -> int: return PID -@reporter.step_deco("Wrapper for neo-go dump keys command") +@reporter.step("Wrapper for neo-go dump keys command") def dump_keys(shell: Shell, node: ClusterNode) -> dict: host = node.host service_config = host.get_service_config(node.service(MorphChain).name) @@ -169,7 +163,7 @@ def dump_keys(shell: Shell, node: ClusterNode) -> dict: return neo_go_dump_keys(shell=shell, wallet=wallet) -@reporter.step_deco("Wait for object replication") +@reporter.step("Wait for object replication") def wait_object_replication( cid: str, oid: str, diff --git a/src/frostfs_testlib/utils/file_keeper.py b/src/frostfs_testlib/utils/file_keeper.py index ad6836b..a5670cc 100644 --- a/src/frostfs_testlib/utils/file_keeper.py +++ b/src/frostfs_testlib/utils/file_keeper.py @@ -1,17 +1,15 @@ from concurrent.futures import ThreadPoolExecutor -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.dataclasses.node_base import NodeBase -reporter = get_reporter() - class FileKeeper: """This class is responsible to make backup copy of modified file and restore when required (mostly after the test)""" files_to_restore: dict[NodeBase, list[str]] = {} - @reporter.step_deco("Adding {file_to_restore} from node {node} to restore list") + @reporter.step("Adding {file_to_restore} from node {node} to restore list") def add(self, node: NodeBase, file_to_restore: str): if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]: # Already added @@ -26,7 +24,7 @@ class FileKeeper: shell = node.host.get_shell() shell.exec(f"cp {file_to_restore} {file_to_restore}.bak") - @reporter.step_deco("Restore files") + @reporter.step("Restore files") def restore_files(self): nodes = self.files_to_restore.keys() if not nodes: @@ -41,7 +39,7 @@ class FileKeeper: # Iterate through results for exception check if any pass - @reporter.step_deco("Restore files on node {node}") + @reporter.step("Restore files on node {node}") def _restore_files_on_node(self, node: NodeBase): shell = node.host.get_shell() for file_to_restore in self.files_to_restore[node]: diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index a41665e..d238106 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -4,10 +4,9 @@ import os import uuid from typing import Any, Optional -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -61,7 +60,7 @@ def generate_file_with_content( return file_path -@reporter.step_deco("Get File Hash") +@reporter.step("Get File Hash") def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: """Generates hash for the specified file. @@ -88,7 +87,7 @@ def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[in return file_hash.hexdigest() -@reporter.step_deco("Concatenation set of files to one file") +@reporter.step("Concatenation set of files to one file") def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: """Concatenates several files into a single file. -- 2.45.2