From 584ba5f0d1bc9b00ffffc3c93f2ad7a7089b5304 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 31 May 2023 16:02:26 +0300 Subject: [PATCH 001/274] Update shards list command Signed-off-by: Andrey Berezin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 4 ++-- src/frostfs_testlib/storage/controllers/shards_watcher.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 6b47ac2..ffa0652 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -118,7 +118,7 @@ class FrostfsCliShards(CliCommand): wallet: str, wallet_password: str, address: Optional[str] = None, - json_mode: bool = False, + json: bool = False, timeout: Optional[str] = None, ) -> CommandResult: """ @@ -126,7 +126,7 @@ class FrostfsCliShards(CliCommand): Args: address: Address of wallet account. - json_mode: Print shard info as a JSON array. + json: Print shard info as a JSON array. endpoint: Remote node address (as 'multiaddr' or ':'). wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet_password: Wallet password. diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index 6607824..bd7c8cd 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -99,6 +99,7 @@ class ShardsWatcher: endpoint=self.storage_node.get_control_endpoint(), wallet=self.storage_node.get_remote_wallet_path(), wallet_password=self.storage_node.get_wallet_password(), + json=True, ) return json.loads(response.stdout.split(">", 1)[1]) From 3cb2f28ef55cfe06357ba5003caca188a9fd5bec Mon Sep 17 00:00:00 2001 From: sstovbyra Date: Wed, 31 May 2023 14:09:05 +0300 Subject: [PATCH 002/274] Update version_utils.py added new binaries to check Update version_utils.py --- src/frostfs_testlib/utils/version_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 68f8578..26fedf5 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -1,5 +1,6 @@ import logging import re +import os from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.hosting import Hosting @@ -18,7 +19,10 @@ logger = logging.getLogger("NeoLogger") def get_local_binaries_versions(shell: Shell) -> dict[str, str]: versions = {} - for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]: + # Extra binaries to get version from + extra_binaries = os.getenv("EXTRA_BINARIES", "").split(',') + + for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC, *extra_binaries]: out = shell.exec(f"{binary} --version").stdout versions[binary] = _parse_version(out) From 987e7f2a3062d9f5e6acb2ef2617c177b8a0d9a6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 2 Jun 2023 13:03:28 +0300 Subject: [PATCH 003/274] Revert "Update shards list command" This reverts commit 584ba5f0d1bc9b00ffffc3c93f2ad7a7089b5304. --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 4 ++-- src/frostfs_testlib/storage/controllers/shards_watcher.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index ffa0652..6b47ac2 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -118,7 +118,7 @@ class FrostfsCliShards(CliCommand): wallet: str, wallet_password: str, address: Optional[str] = None, - json: bool = False, + json_mode: bool = False, timeout: Optional[str] = None, ) -> CommandResult: """ @@ -126,7 +126,7 @@ class FrostfsCliShards(CliCommand): Args: address: Address of wallet account. - json: Print shard info as a JSON array. + json_mode: Print shard info as a JSON array. endpoint: Remote node address (as 'multiaddr' or ':'). wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet_password: Wallet password. diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index bd7c8cd..6607824 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -99,7 +99,6 @@ class ShardsWatcher: endpoint=self.storage_node.get_control_endpoint(), wallet=self.storage_node.get_remote_wallet_path(), wallet_password=self.storage_node.get_wallet_password(), - json=True, ) return json.loads(response.stdout.split(">", 1)[1]) From e3c0f768960c30441ee74fe90b1215fe3ef63dce Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 2 Jun 2023 13:08:17 +0300 Subject: [PATCH 004/274] Proper usage for shards_watcher Signed-off-by: Andrey Berezin --- src/frostfs_testlib/storage/controllers/shards_watcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index 6607824..95a419e 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -99,6 +99,7 @@ class ShardsWatcher: endpoint=self.storage_node.get_control_endpoint(), wallet=self.storage_node.get_remote_wallet_path(), wallet_password=self.storage_node.get_wallet_password(), + json_mode=True, ) return json.loads(response.stdout.split(">", 1)[1]) From e9777b63cde2175f585ba52ff0771782d8e69a1d Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 2 Jun 2023 15:56:23 +0300 Subject: [PATCH 005/274] update allure in requirements.txt Signed-off-by: Andrey Berezin --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c653f7b..5b47640 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -allure-python-commons==2.9.45 +allure-python-commons==2.13.2 docker==4.4.0 importlib_metadata==5.0.0 neo-mamba==1.0.0 From 26a78c0eae3d0350f2714c128f36fa15a70f90ff Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 5 Jun 2023 12:00:06 +0300 Subject: [PATCH 006/274] New methods with nodes Signed-off-by: Dmitriy Zayakin --- .../cli/frostfs_cli/container.py | 42 +++++++++++++++++++ .../controllers/cluster_state_controller.py | 0 src/frostfs_testlib/hosting/docker_host.py | 6 +++ src/frostfs_testlib/hosting/interfaces.py | 14 +++++++ src/frostfs_testlib/steps/cli/container.py | 27 +++++++++++- src/frostfs_testlib/steps/http/http_gate.py | 28 +++++++++---- src/frostfs_testlib/steps/s3/s3_helper.py | 22 +++++++++- src/frostfs_testlib/storage/cluster.py | 11 +++++ .../controllers/cluster_state_controller.py | 26 ++++++++++++ .../storage/dataclasses/node_base.py | 4 ++ 10 files changed, 170 insertions(+), 10 deletions(-) create mode 100644 src/frostfs_testlib/controllers/cluster_state_controller.py diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 533ff1a..5ea8ba8 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -262,3 +262,45 @@ class FrostfsCliContainer(CliCommand): "container set-eacl", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def search_node( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Show the nodes participating in the container in the current epoch. + + Args: + rpc_endpoint: string Remote host address (as 'multiaddr' or ':') + wallet: WIF (NEP-2) string or path to the wallet or binary key. + cid: Container ID. + address: Address of wallet account. + ttl: TTL value in request meta header (default 2). + from_file: string File path with encoded container + timeout: duration Timeout for the operation (default 15 s) + short: shorten the output of node information. + xhdr: Dict with request X-Headers. + generate_key: Generate a new private key + + Returns: + + """ + from_str = f"--from {from_file}" if from_file else "" + + return self._execute( + f"container nodes {from_str}", + **{ + param: value + for param, value in locals().items() + if param not in ["self", "from_file", "from_str"] + }, + ) diff --git a/src/frostfs_testlib/controllers/cluster_state_controller.py b/src/frostfs_testlib/controllers/cluster_state_controller.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index b7f4852..1e32340 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -117,6 +117,12 @@ class DockerHost(Host): timeout=service_attributes.stop_timeout, ) + def wait_success_suspend_process(self, service_name: str): + raise NotImplementedError("Not supported for docker") + + def wait_success_resume_process(self, service_name: str): + raise NotImplementedError("Not supported for docker") + def restart_service(self, service_name: str) -> None: service_attributes = self._get_service_attributes(service_name) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 9178523..95536c6 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -112,6 +112,20 @@ class Host(ABC): service_name: Name of the service to restart. """ + @abstractmethod + def wait_success_suspend_process(self, process_name: str) -> None: + """Search for a service ID by its name and stop the process + Args: + process_name: Name + """ + + @abstractmethod + def wait_success_resume_process(self, process_name: str) -> None: + """Search for a service by its ID and start the process + Args: + process_name: Name + """ + @abstractmethod def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: """Erases all data of the storage node with specified name. diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 89070c4..74f445a 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -1,5 +1,6 @@ import json import logging +import re from dataclasses import dataclass from time import sleep from typing import Optional, Union @@ -10,7 +11,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node -from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import json_utils @@ -357,3 +358,27 @@ def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str if cont_info.get("attributes", {}).get("Name", None) == name: return cid return None + + +@reporter.step_deco("Search for nodes with a container") +def search_nodes_with_container( + wallet: str, + cid: str, + shell: Shell, + endpoint: str, + cluster: Cluster, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[ClusterNode]: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + result = cli.container.search_node( + rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout + ) + + pattern = r"[0-9]+(?:\.[0-9]+){3}" + nodes_ip = list(set(re.findall(pattern, result.stdout))) + + with reporter.step(f"nodes ips = {nodes_ip}"): + nodes_list = cluster.get_nodes_by_ip(nodes_ip) + + with reporter.step(f"Return nodes - {nodes_list}"): + return nodes_list diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index c9769fb..64bb5ce 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -28,7 +28,13 @@ ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") @reporter.step_deco("Get via HTTP Gate") -def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[str] = None): +def get_via_http_gate( + cid: str, + oid: str, + endpoint: str, + request_path: Optional[str] = None, + timeout: Optional[int] = 300, +): """ This function gets given object from HTTP gate cid: container id to get object from @@ -43,7 +49,7 @@ def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[ else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True) + resp = requests.get(request, stream=True, timeout=timeout) if not resp.ok: raise Exception( @@ -63,7 +69,7 @@ def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[ @reporter.step_deco("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str): +def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optional[int] = 300): """ This function gets given object from HTTP gate cid: container id to get object from @@ -71,7 +77,7 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str): endpoint: http gate endpoint """ request = f"{endpoint}/zip/{cid}/{prefix}" - resp = requests.get(request, stream=True) + resp = requests.get(request, stream=True, timeout=timeout) if not resp.ok: raise Exception( @@ -96,7 +102,11 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str): @reporter.step_deco("Get via HTTP Gate by attribute") def get_via_http_gate_by_attribute( - cid: str, attribute: dict, endpoint: str, request_path: Optional[str] = None + cid: str, + attribute: dict, + endpoint: str, + request_path: Optional[str] = None, + timeout: Optional[int] = 300, ): """ This function gets given object from HTTP gate @@ -113,7 +123,7 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True) + resp = requests.get(request, stream=True, timeout=timeout) if not resp.ok: raise Exception( @@ -133,7 +143,9 @@ def get_via_http_gate_by_attribute( @reporter.step_deco("Upload via HTTP Gate") -def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None) -> str: +def upload_via_http_gate( + cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 +) -> str: """ This function upload given object through HTTP gate cid: CID to get object from @@ -144,7 +156,7 @@ def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[d request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post(request, files=files, data=body, headers=headers) + resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout) if not resp.ok: raise Exception( diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 87f929e..0c6c448 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -12,7 +12,12 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus -from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.container import ( + search_container_by_name, + search_nodes_with_container, +) +from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils.cli_utils import _run_with_passwd @@ -245,3 +250,18 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): # Delete the bucket itself s3_client.delete_bucket(bucket) + + +@reporter.step_deco("Search nodes bucket") +def search_nodes_with_bucket( + cluster: Cluster, + bucket_name: str, + wallet: str, + shell: Shell, + endpoint: str, +) -> list[ClusterNode]: + cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint) + nodes_list = search_nodes_with_container( + wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster + ) + return nodes_list diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 2158dc2..91487c9 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -2,9 +2,11 @@ import random import re import yaml +from yarl import URL from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting.config import ServiceConfig +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.frostfs_services import ( @@ -17,6 +19,8 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import ( from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.service_registry import ServiceRegistry +reporter = get_reporter() + class ClusterNode: """ @@ -250,3 +254,10 @@ class Cluster: def get_morph_endpoints(self) -> list[str]: nodes: list[MorphChain] = self.services(MorphChain) return [node.get_endpoint() for node in nodes] + + def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]: + cluster_nodes = [ + node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips + ] + with reporter.step(f"Return cluster nodes - {cluster_nodes}"): + return cluster_nodes diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 705caf0..70f3e21 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -25,6 +25,7 @@ class ClusterStateController: self.stopped_storage_nodes: list[ClusterNode] = [] self.cluster = cluster self.shell = shell + self.suspended_services: dict[str, list[ClusterNode]] = {} @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop host of node {node}") @@ -130,6 +131,31 @@ class ClusterStateController: wait_all_storage_nodes_returned(self.shell, self.cluster) self.stopped_storage_nodes = [] + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Suspend {process_name} service in {node}") + def suspend_service(self, process_name: str, node: ClusterNode): + node.host.wait_success_suspend_process(process_name) + if self.suspended_services.get(process_name): + self.suspended_services[process_name].append(node) + else: + self.suspended_services[process_name] = [node] + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Resume {process_name} service in {node}") + def resume_service(self, process_name: str, node: ClusterNode): + node.host.wait_success_resume_process(process_name) + if self.suspended_services.get(process_name): + self.suspended_services[process_name].append(node) + else: + self.suspended_services[process_name] = [node] + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start suspend processes services") + def resume_suspended_services(self): + for process_name, list_nodes in self.suspended_services.items(): + [node.host.wait_success_resume_process(process_name) for node in list_nodes] + self.suspended_services = {} + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Hard reboot host {node} via magic SysRq option") def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True): diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 8fcb03b..150b963 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -19,6 +19,7 @@ class NodeBase(ABC): id: str name: str host: Host + _process_name: str def __init__(self, id, name, host) -> None: self.id = id @@ -48,6 +49,9 @@ class NodeBase(ABC): def get_service_systemctl_name(self) -> str: return self._get_attribute(ConfigAttributes.SERVICE_NAME) + def get_process_name(self) -> str: + return self._process_name + def start_service(self): self.host.start_service(self.name) From f2f3d3c8e3814ababf38158a97d94cfad0d386a5 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 5 Jun 2023 12:10:32 +0300 Subject: [PATCH 007/274] Add get_data_directory function --- src/frostfs_testlib/hosting/docker_host.py | 4 ++++ src/frostfs_testlib/hosting/interfaces.py | 12 ++++++++++++ .../storage/dataclasses/frostfs_services.py | 3 +++ 3 files changed, 19 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 1e32340..5dcac9e 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -135,6 +135,10 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) + def get_data_directory(self, service_name: str) -> str: + service_attributes = self._get_service_attributes(service_name) + return service_attributes.data_directory_path + def delete_metabase(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 95536c6..8d889da 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -112,6 +112,18 @@ class Host(ABC): service_name: Name of the service to restart. """ + + @abstractmethod + def get_data_directory(self, service_name: str) -> str: + """ + Getting path to data directory on node for further usage + (example: list databases pilorama.db) + + Args: + service_name: Name of storage node service. + """ + + @abstractmethod def wait_success_suspend_process(self, process_name: str) -> None: """Search for a service ID by its name and stop the process diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 7bb4c2b..2b52c1f 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -167,6 +167,9 @@ class StorageNode(NodeBase): def get_un_locode(self): return self._get_attribute(ConfigAttributes.UN_LOCODE) + + def get_data_directory(self) -> str: + return self.host.get_data_directory(self.name) def delete_blobovnicza(self): self.host.delete_blobovnicza(self.name) From 98f5075715123cfd62d726e49d09c5d5746ff5f2 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Tue, 13 Jun 2023 12:07:21 +0300 Subject: [PATCH 008/274] Functions for stop/start s3 gateway in cluster_state_controller --- .../controllers/cluster_state_controller.py | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 70f3e21..1084552 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -23,6 +23,7 @@ class ClusterStateController: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} self.stopped_storage_nodes: list[ClusterNode] = [] + self.stopped_s3_gate: list[ClusterNode] = [] self.cluster = cluster self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} @@ -131,6 +132,28 @@ class ClusterStateController: wait_all_storage_nodes_returned(self.shell, self.cluster) self.stopped_storage_nodes = [] + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop s3 gate on {node}") + def stop_s3_gate(self, node: ClusterNode): + node.s3_gate.stop_service() + self.stopped_s3_gate.append(node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start s3 gate on {node}") + def start_s3_gate(self, node: ClusterNode): + node.s3_gate.start_service() + self.stopped_s3_gate.remove(node) + + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start stopped S3 gates") + def start_stopped_s3_gate(self): + # not sure if we need here to use threads like in start_stopped_storage_services + for s3_gate in self.stopped_s3_gate: + s3_gate.start_service() + self.stopped_s3_gate = [] + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): From c0f63e378354f643cccaea2fdf97a15cd7058c95 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 19 Jun 2023 13:39:35 +0300 Subject: [PATCH 009/274] New methods S3 client Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/s3/aws_cli_client.py | 4 ++++ src/frostfs_testlib/s3/boto3_client.py | 24 ++++++++++++++++++------ src/frostfs_testlib/s3/interfaces.py | 4 ++++ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 054a1e8..a9aeb37 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -39,6 +39,10 @@ class AwsCliClient(S3ClientWrapper): except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err + @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + self.s3gate_endpoint = s3gate_endpoint + @reporter.step_deco("Create bucket S3") def create_bucket( self, diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 07c693f..6d6fc74 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -47,19 +47,31 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step_deco("Configure S3 client (boto3)") @report_error def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: - session = boto3.Session() - config = Config( + self.boto3_client: S3Client = None + self.session = boto3.Session() + self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE, } ) + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key + self.s3gate_endpoint: str = "" + self.set_endpoint(s3gate_endpoint) - self.boto3_client: S3Client = session.client( + @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + if self.s3gate_endpoint == s3gate_endpoint: + return + + self.s3gate_endpoint = s3gate_endpoint + + self.boto3_client: S3Client = self.session.client( service_name="s3", - aws_access_key_id=access_key_id, - aws_secret_access_key=secret_access_key, - config=config, + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + config=self.config, endpoint_url=s3gate_endpoint, verify=False, ) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index bd1379c..3f31395 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -34,6 +34,10 @@ class S3ClientWrapper(ABC): def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: pass + @abstractmethod + def set_endpoint(self, s3gate_endpoint: str): + """Set endpoint""" + @abstractmethod def create_bucket( self, From 13ea25bff5252a24b4cbda21c458260367bfc8b6 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 21 Jun 2023 13:02:16 +0300 Subject: [PATCH 010/274] Change s3 auth func Signed-off-by: Dmitriy Zayakin --- .../controllers/cluster_state_controller.py | 0 src/frostfs_testlib/steps/s3/s3_helper.py | 74 +++++++++---------- 2 files changed, 37 insertions(+), 37 deletions(-) delete mode 100644 src/frostfs_testlib/controllers/cluster_state_controller.py diff --git a/src/frostfs_testlib/controllers/cluster_state_controller.py b/src/frostfs_testlib/controllers/cluster_state_controller.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 0c6c448..d6c2095 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -8,17 +8,20 @@ from typing import Optional from dateutil.parser import parse +from frostfs_testlib.cli import FrostfsAuthmate from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus -from frostfs_testlib.shell import Shell +from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell +from frostfs_testlib.shell.interfaces import SshCredentials from frostfs_testlib.steps.cli.container import ( search_container_by_name, search_nodes_with_container, ) from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils.cli_utils import _run_with_passwd reporter = get_reporter() @@ -183,48 +186,45 @@ def assert_s3_acl(acl_grants: list, permitted_users: str): @reporter.step_deco("Init S3 Credentials") def init_s3_credentials( - wallet_path: str, + wallet: WalletInfo, + shell: Shell, cluster: Cluster, s3_bearer_rules_file: str, policy: Optional[dict] = None, + s3gates: Optional[list[S3Gate]] = None, ): + gate_public_keys = [] bucket = str(uuid.uuid4()) - - s3gate_node = cluster.services(S3Gate)[0] - gate_public_key = s3gate_node.get_wallet_public_key() - cmd = ( - f"{FROSTFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} " - f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} " - f"--peer {cluster.default_rpc_endpoint} --container-friendly-name {bucket} " - f"--bearer-rules {s3_bearer_rules_file}" + if not s3gates: + s3gates = [cluster.s3_gates[0]] + for s3gate in s3gates: + gate_public_keys.append(s3gate.get_wallet_public_key()) + frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate_exec.secret.issue( + wallet=wallet.path, + peer=cluster.default_rpc_endpoint, + bearer_rules=s3_bearer_rules_file, + gate_public_key=gate_public_keys, + wallet_password=wallet.password, + container_policy=policy, + container_friendly_name=bucket, + ).stdout + aws_access_key_id = str( + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( + "aws_access_key_id" + ) ) - if policy: - cmd += f" --container-policy {policy}'" - logger.info(f"Executing command: {cmd}") - - try: - output = _run_with_passwd(cmd) - logger.info(f"Command completed with output: {output}") - - # output contains some debug info and then several JSON structures, so we find each - # JSON structure by curly brackets (naive approach, but works while JSON is not nested) - # and then we take JSON containing secret_access_key - json_blocks = re.findall(r"\{.*?\}", output, re.DOTALL) - for json_block in json_blocks: - try: - parsed_json_block = json.loads(json_block) - if "secret_access_key" in parsed_json_block: - return ( - parsed_json_block["container_id"], - parsed_json_block["access_key_id"], - parsed_json_block["secret_access_key"], - ) - except json.JSONDecodeError: - raise AssertionError(f"Could not parse info from output\n{output}") - raise AssertionError(f"Could not find AWS credentials in output:\n{output}") - - except Exception as exc: - raise RuntimeError(f"Failed to init s3 credentials because of error\n{exc}") from exc + aws_secret_access_key = str( + re.search( + r"secret_access_key.*:\s.(?P\w*)", issue_secret_output + ).group("aws_secret_access_key") + ) + cid = str( + re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group( + "container_id" + ) + ) + return cid, aws_access_key_id, aws_secret_access_key @reporter.step_deco("Delete bucket with all objects") From 182bd6ab367cb86342d578ab5d029818cbbf8d1d Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 26 Jun 2023 16:45:34 +0300 Subject: [PATCH 011/274] Add loader and sceanrio runner interfaces, add support for local scenario Signed-off-by: Andrey Berezin --- .../cli/frostfs_authmate/authmate.py | 4 +- src/frostfs_testlib/load/__init__.py | 13 + src/frostfs_testlib/load/interfaces.py | 53 +++ src/frostfs_testlib/load/k6.py | 91 ++-- src/frostfs_testlib/load/load_config.py | 17 +- src/frostfs_testlib/load/load_metrics.py | 12 + src/frostfs_testlib/load/load_report.py | 1 + src/frostfs_testlib/load/load_steps.py | 191 --------- src/frostfs_testlib/load/loaders.py | 60 +++ src/frostfs_testlib/load/runners.py | 398 ++++++++++++++++++ .../processes/remote_process.py | 73 +++- .../reporter/allure_handler.py | 2 +- src/frostfs_testlib/resources/common.py | 2 + .../shell/command_inspectors.py | 18 +- src/frostfs_testlib/shell/interfaces.py | 5 +- src/frostfs_testlib/shell/local_shell.py | 6 +- src/frostfs_testlib/shell/ssh_shell.py | 6 +- .../controllers/background_load_controller.py | 149 ++----- .../controllers/cluster_state_controller.py | 69 ++- 19 files changed, 786 insertions(+), 384 deletions(-) create mode 100644 src/frostfs_testlib/load/__init__.py create mode 100644 src/frostfs_testlib/load/interfaces.py delete mode 100644 src/frostfs_testlib/load/load_steps.py create mode 100644 src/frostfs_testlib/load/loaders.py create mode 100644 src/frostfs_testlib/load/runners.py diff --git a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py index ba3a3b0..7912dae 100644 --- a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py +++ b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py @@ -6,8 +6,8 @@ from frostfs_testlib.shell import Shell class FrostfsAuthmate: - secret: Optional[FrostfsAuthmateSecret] = None - version: Optional[FrostfsAuthmateVersion] = None + secret: FrostfsAuthmateSecret + version: FrostfsAuthmateVersion def __init__(self, shell: Shell, frostfs_authmate_exec_path: str): self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py new file mode 100644 index 0000000..e8ed75e --- /dev/null +++ b/src/frostfs_testlib/load/__init__.py @@ -0,0 +1,13 @@ +from frostfs_testlib.load.interfaces import Loader, ScenarioRunner +from frostfs_testlib.load.load_config import ( + EndpointSelectionStrategy, + K6ProcessAllocationStrategy, + LoadParams, + LoadScenario, + LoadType, + NodesSelectionStrategy, + Preset, +) +from frostfs_testlib.load.load_report import LoadReport +from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader +from frostfs_testlib.load.runners import DefaultRunner, LocalRunner diff --git a/src/frostfs_testlib/load/interfaces.py b/src/frostfs_testlib/load/interfaces.py new file mode 100644 index 0000000..fbbc20b --- /dev/null +++ b/src/frostfs_testlib/load/interfaces.py @@ -0,0 +1,53 @@ +from abc import ABC, abstractmethod + +from frostfs_testlib.load.load_config import LoadParams +from frostfs_testlib.shell.interfaces import Shell +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + + +class Loader(ABC): + @abstractmethod + def get_shell(self) -> Shell: + """Get shell for the loader""" + + @property + @abstractmethod + def ip(self): + """Get address of the loader""" + + +class ScenarioRunner(ABC): + @abstractmethod + def prepare( + self, + load_params: LoadParams, + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + """Preparation steps before running the load""" + + @abstractmethod + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + """Init K6 instances""" + + @abstractmethod + def start(self): + """Start K6 instances""" + + @abstractmethod + def stop(self): + """Stop K6 instances""" + + @property + @abstractmethod + def is_running(self) -> bool: + """Returns True if load is running at the moment""" + + @abstractmethod + def wait_until_finish(self): + """Wait until load is finished""" + + @abstractmethod + def get_results(self) -> dict: + """Get results from K6 run""" diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 2fa2c00..ca3f696 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -1,10 +1,12 @@ import json import logging +import math import os from dataclasses import dataclass, fields from time import sleep from typing import Any +from frostfs_testlib.load.interfaces import Loader from frostfs_testlib.load.load_config import ( K6ProcessAllocationStrategy, LoadParams, @@ -13,7 +15,12 @@ from frostfs_testlib.load.load_config import ( ) from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, LOAD_NODE_SSH_USER +from frostfs_testlib.resources.common import STORAGE_USER_NAME +from frostfs_testlib.resources.load_params import ( + K6_STOP_SIGNAL_TIMEOUT, + K6_TEARDOWN_PERIOD, + LOAD_NODE_SSH_USER, +) from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import wait_for_success @@ -42,7 +49,7 @@ class K6: endpoints: list[str], k6_dir: str, shell: Shell, - load_node: str, + loader: Loader, wallet: WalletInfo, ): if load_params.scenario is None: @@ -50,7 +57,7 @@ class K6: self.load_params: LoadParams = load_params self.endpoints = endpoints - self.load_node: str = load_node + self.loader: Loader = loader self.shell: Shell = shell self.wallet = wallet self.scenario: LoadScenario = load_params.scenario @@ -151,32 +158,56 @@ class K6: [f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None] ) - @reporter.step_deco("Start K6 on initiator") def start(self) -> None: - command = ( - f"{self._k6_dir}/k6 run {self._generate_env_variables()} " - f"{self._k6_dir}/scenarios/{self.scenario.value}.js" - ) - self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir) + with reporter.step( + f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}" + ): + command = ( + f"{self._k6_dir}/k6 run {self._generate_env_variables()} " + f"{self._k6_dir}/scenarios/{self.scenario.value}.js" + ) + user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None + self._k6_process = RemoteProcess.create( + command, self.shell, self.load_params.working_dir, user + ) + + def wait_until_finished(self) -> None: + with reporter.step( + f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}" + ): + if self.load_params.scenario == LoadScenario.VERIFY: + timeout = self.load_params.verify_time or 0 + else: + timeout = self.load_params.load_time or 0 + + timeout += int(K6_TEARDOWN_PERIOD) + original_timeout = timeout + + min_wait_interval = 10 + wait_interval = min_wait_interval + if self._k6_process is None: + assert "No k6 instances were executed" + while timeout > 0: + if not self._k6_process.running(): + return + logger.info(f"K6 is running. Waiting {wait_interval} seconds...") + sleep(wait_interval) + timeout -= min(timeout, wait_interval) + wait_interval = max( + min(timeout, int(math.log2(timeout + 1)) * 15) - min_wait_interval, + min_wait_interval, + ) - @reporter.step_deco("Wait until K6 is finished") - def wait_until_finished(self, timeout: int = 0, k6_should_be_running: bool = False) -> None: - wait_interval = 10 - if self._k6_process is None: - assert "No k6 instances were executed" - if k6_should_be_running: - assert self._k6_process.running(), "k6 should be running." - while timeout > 0: if not self._k6_process.running(): return - logger.info(f"K6 is running. Waiting {wait_interval} seconds...") - sleep(wait_interval) - timeout -= wait_interval - self.stop() - raise TimeoutError(f"Expected K6 finished in {timeout} sec.") + + self.stop() + raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") def get_results(self) -> Any: - with reporter.step(f"K6 results from {self.load_node}"): + with reporter.step( + f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}" + ): self.__log_output() if not self.summary_json: @@ -186,20 +217,20 @@ class K6: summary_json = json.loads(summary_text) allure_filenames = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.load_node}_{self.scenario.value}_summary.json", - K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.load_node}_{self.scenario.value}_{self.endpoints[0]}_summary.json", + K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.scenario.value}_summary.json", + K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.scenario.value}_{self.endpoints[0]}_summary.json", } allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy] reporter.attach(summary_text, allure_filename) return summary_json - @reporter.step_deco("Stop K6") def stop(self) -> None: - if self.is_running: - self._k6_process.stop() + with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"): + if self.is_running: + self._k6_process.stop() - self._wait_until_process_end() + self._wait_until_process_end() @property def is_running(self) -> bool: @@ -207,7 +238,7 @@ class K6: return self._k6_process.running() return False - @reporter.step_deco("Wait until process end") + @reporter.step_deco("Wait until K6 process end") @wait_for_success( K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout" ) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 4e67321..c337d7c 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -17,6 +17,7 @@ class LoadScenario(Enum): S3_CAR = "s3_car" HTTP = "http" VERIFY = "verify" + LOCAL = "local" all_load_scenarios = [ @@ -25,13 +26,19 @@ all_load_scenarios = [ LoadScenario.HTTP, LoadScenario.S3_CAR, LoadScenario.gRPC_CAR, + LoadScenario.LOCAL, ] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] -constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP] +constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] -grpc_preset_scenarios = [LoadScenario.gRPC, LoadScenario.HTTP, LoadScenario.gRPC_CAR] +grpc_preset_scenarios = [ + LoadScenario.gRPC, + LoadScenario.HTTP, + LoadScenario.gRPC_CAR, + LoadScenario.LOCAL, +] s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR] @@ -129,6 +136,8 @@ class LoadParams: working_dir: Optional[str] = None # Preset for the k6 run preset: Optional[Preset] = None + # K6 download url + k6_url: Optional[str] = None # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. @@ -207,6 +216,10 @@ class LoadParams: # Amount of Verification VU. verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True) + # ------- LOCAL SCENARIO PARAMS ------- + # Config file location (filled automatically) + config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE") + def set_id(self, load_id): self.load_id = load_id self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 50d7b38..0b4e28e 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -138,6 +138,17 @@ class S3Metrics(MetricsBase): _DELETE_ERRORS = "aws_obj_delete_fails" +class LocalMetrics(MetricsBase): + _WRITE_SUCCESS = "local_obj_put_total" + _WRITE_ERRORS = "local_obj_put_fails" + + _READ_SUCCESS = "local_obj_get_total" + _READ_ERRORS = "local_obj_get_fails" + + _DELETE_SUCCESS = "local_obj_delete_total" + _DELETE_ERRORS = "local_obj_delete_fails" + + class VerifyMetrics(MetricsBase): _WRITE_SUCCESS = "N/A" _WRITE_ERRORS = "N/A" @@ -157,6 +168,7 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr LoadScenario.S3: S3Metrics, LoadScenario.S3_CAR: S3Metrics, LoadScenario.VERIFY: VerifyMetrics, + LoadScenario.LOCAL: LocalMetrics, } return class_map[load_type](summary) diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 5f22515..7f912e4 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -103,6 +103,7 @@ class LoadReport: LoadScenario.HTTP: "closed model", LoadScenario.gRPC_CAR: "open model", LoadScenario.S3_CAR: "open model", + LoadScenario.LOCAL: "local fill", } return model_map[self.load_params.scenario] diff --git a/src/frostfs_testlib/load/load_steps.py b/src/frostfs_testlib/load/load_steps.py deleted file mode 100644 index b55ff22..0000000 --- a/src/frostfs_testlib/load/load_steps.py +++ /dev/null @@ -1,191 +0,0 @@ -import copy -import itertools -import math -import re -from dataclasses import fields - -from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.load.k6 import K6 -from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC -from frostfs_testlib.resources.load_params import ( - BACKGROUND_LOAD_VUS_COUNT_DIVISOR, - LOAD_NODE_SSH_USER, -) -from frostfs_testlib.shell import CommandOptions, SSHShell -from frostfs_testlib.shell.interfaces import InteractiveInput, SshCredentials -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - -reporter = get_reporter() - -STOPPED_HOSTS = [] - - -@reporter.step_deco("Init s3 client on load nodes") -def init_s3_client( - load_nodes: list[str], - load_params: LoadParams, - k6_directory: str, - ssh_credentials: SshCredentials, - nodes_under_load: list[ClusterNode], - wallet: WalletInfo, -): - storage_node = nodes_under_load[0].service(StorageNode) - s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load] - grpc_peer = storage_node.get_rpc_endpoint() - - for load_node in load_nodes: - ssh_client = _get_shell(ssh_credentials, load_node) - frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(ssh_client, FROSTFS_AUTHMATE_EXEC) - issue_secret_output = frostfs_authmate_exec.secret.issue( - wallet=wallet.path, - peer=grpc_peer, - bearer_rules=f"{k6_directory}/scenarios/files/rules.json", - gate_public_key=s3_public_keys, - container_placement_policy=load_params.preset.container_placement_policy, - container_policy=f"{k6_directory}/scenarios/files/policy.json", - wallet_password=wallet.password, - ).stdout - aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( - "aws_access_key_id" - ) - ) - aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", issue_secret_output - ).group("aws_secret_access_key") - ) - # prompt_pattern doesn't work at the moment - configure_input = [ - InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), - InteractiveInput( - prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key - ), - InteractiveInput(prompt_pattern=r".*", input=""), - InteractiveInput(prompt_pattern=r".*", input=""), - ] - ssh_client.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) - - -@reporter.step_deco("Prepare K6 instances and objects") -def prepare_k6_instances( - load_nodes: list[str], - ssh_credentials: SshCredentials, - k6_dir: str, - load_params: LoadParams, - endpoints: list[str], - loaders_wallet: WalletInfo, -) -> list[K6]: - k6_load_objects: list[K6] = [] - nodes = itertools.cycle(load_nodes) - - k6_distribution_count = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: len(load_nodes), - K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints), - } - endpoints_generators = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), - K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle( - [[endpoint] for endpoint in endpoints] - ), - } - k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] - endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] - - distributed_load_params_list = _get_distributed_load_params_list( - load_params, k6_processes_count - ) - - for distributed_load_params in distributed_load_params_list: - load_node = next(nodes) - shell = _get_shell(ssh_credentials, load_node) - # Make working_dir directory - shell.exec(f"sudo mkdir -p {distributed_load_params.working_dir}") - shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {distributed_load_params.working_dir}") - - k6_load_object = K6( - distributed_load_params, - next(endpoints_gen), - k6_dir, - shell, - load_node, - loaders_wallet, - ) - k6_load_objects.append(k6_load_object) - if load_params.preset: - k6_load_object.preset() - - return k6_load_objects - - -def _get_shell(ssh_credentials: SshCredentials, load_node: str) -> SSHShell: - ssh_client = SSHShell( - host=load_node, - login=ssh_credentials.ssh_login, - password=ssh_credentials.ssh_password, - private_key_path=ssh_credentials.ssh_key_path, - private_key_passphrase=ssh_credentials.ssh_key_passphrase, - ) - - return ssh_client - - -def _get_distributed_load_params_list( - original_load_params: LoadParams, workers_count: int -) -> list[LoadParams]: - divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) - distributed_load_params: list[LoadParams] = [] - - for i in range(workers_count): - load_params = copy.deepcopy(original_load_params) - # Append #i here in case if multiple k6 processes goes into same load node - load_params.set_id(f"{load_params.load_id}_{i}") - distributed_load_params.append(load_params) - - load_fields = fields(original_load_params) - - for field in load_fields: - if ( - field.metadata - and original_load_params.scenario in field.metadata["applicable_scenarios"] - and field.metadata["distributed"] - and getattr(original_load_params, field.name) is not None - ): - original_value = getattr(original_load_params, field.name) - distribution = _get_distribution(math.ceil(original_value / divisor), workers_count) - for i in range(workers_count): - setattr(distributed_load_params[i], field.name, distribution[i]) - - return distributed_load_params - - -def _get_distribution(clients_count: int, workers_count: int) -> list[int]: - """ - This function will distribute evenly as possible X clients to Y workers. - For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers) - this will return [38, 38, 37, 37]. - - Args: - clients_count: amount of things needs to be distributed. - workers_count: amount of workers. - - Returns: - list of distribution. - """ - if workers_count < 1: - raise Exception("Workers cannot be less then 1") - - # Amount of guaranteed payload on one worker - clients_per_worker = clients_count // workers_count - # Remainder of clients left to be distributed - remainder = clients_count - clients_per_worker * workers_count - - distribution = [ - clients_per_worker + 1 if i < remainder else clients_per_worker - for i in range(workers_count) - ] - return distribution diff --git a/src/frostfs_testlib/load/loaders.py b/src/frostfs_testlib/load/loaders.py new file mode 100644 index 0000000..9e92155 --- /dev/null +++ b/src/frostfs_testlib/load/loaders.py @@ -0,0 +1,60 @@ +from frostfs_testlib.load.interfaces import Loader +from frostfs_testlib.resources.load_params import ( + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_USER, +) +from frostfs_testlib.shell.interfaces import Shell, SshCredentials +from frostfs_testlib.shell.ssh_shell import SSHShell +from frostfs_testlib.storage.cluster import ClusterNode + + +class RemoteLoader(Loader): + def __init__(self, ssh_credentials: SshCredentials, ip: str) -> None: + self.ssh_credentials = ssh_credentials + self._ip = ip + + @property + def ip(self): + return self._ip + + def get_shell(self) -> Shell: + ssh_client = SSHShell( + host=self.ip, + login=self.ssh_credentials.ssh_login, + password=self.ssh_credentials.ssh_password, + private_key_path=self.ssh_credentials.ssh_key_path, + private_key_passphrase=self.ssh_credentials.ssh_key_passphrase, + ) + + return ssh_client + + @classmethod + def from_ip_list(cls, ip_list: list[str]) -> list[Loader]: + loaders: list[Loader] = [] + ssh_credentials = SshCredentials( + LOAD_NODE_SSH_USER, + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + ) + + for ip in ip_list: + loaders.append(RemoteLoader(ssh_credentials, ip)) + + return loaders + + +class NodeLoader(Loader): + """When ClusterNode is the loader for itself (for Local scenario only).""" + + def __init__(self, cluster_node: ClusterNode) -> None: + self.cluster_node = cluster_node + + def get_shell(self) -> Shell: + return self.cluster_node.host.get_shell() + + @property + def ip(self): + return self.cluster_node.host_ip diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py new file mode 100644 index 0000000..6f9d046 --- /dev/null +++ b/src/frostfs_testlib/load/runners.py @@ -0,0 +1,398 @@ +import copy +import itertools +import math +import re +import time +from concurrent.futures import ThreadPoolExecutor +from dataclasses import fields +from typing import Optional + +import yaml + +from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate +from frostfs_testlib.load.interfaces import Loader, ScenarioRunner +from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType +from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources import optionals +from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC +from frostfs_testlib.resources.common import STORAGE_USER_NAME +from frostfs_testlib.resources.load_params import ( + BACKGROUND_LOAD_VUS_COUNT_DIVISOR, + LOAD_NODE_SSH_USER, + LOAD_NODES, +) +from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.test_control import run_optionally +from frostfs_testlib.utils import datetime_utils +from frostfs_testlib.utils.file_keeper import FileKeeper + +reporter = get_reporter() + + +class DefaultRunner(ScenarioRunner): + k6_instances: list[K6] + loaders: list[Loader] + loaders_wallet: WalletInfo + + def __init__( + self, + loaders_wallet: WalletInfo, + load_ip_list: Optional[list[str]] = None, + ) -> None: + if load_ip_list is None: + load_ip_list = LOAD_NODES + self.loaders = RemoteLoader.from_ip_list(load_ip_list) + self.loaders_wallet = loaders_wallet + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Prepare load instances") + def prepare( + self, + load_params: LoadParams, + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + if load_params.load_type != LoadType.S3: + return + + with reporter.step("Init s3 client on loaders"): + storage_node = nodes_under_load[0].service(StorageNode) + s3_public_keys = [ + node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load + ] + grpc_peer = storage_node.get_rpc_endpoint() + + for loader in self.loaders: + with reporter.step(f"Init s3 client on {loader.ip}"): + shell = loader.get_shell() + frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate( + shell, FROSTFS_AUTHMATE_EXEC + ) + issue_secret_output = frostfs_authmate_exec.secret.issue( + wallet=self.loaders_wallet.path, + peer=grpc_peer, + bearer_rules=f"{k6_dir}/scenarios/files/rules.json", + gate_public_key=s3_public_keys, + container_placement_policy=load_params.preset.container_placement_policy, + container_policy=f"{k6_dir}/scenarios/files/policy.json", + wallet_password=self.loaders_wallet.password, + ).stdout + aws_access_key_id = str( + re.search( + r"access_key_id.*:\s.(?P\w*)", issue_secret_output + ).group("aws_access_key_id") + ) + aws_secret_access_key = str( + re.search( + r"secret_access_key.*:\s.(?P\w*)", + issue_secret_output, + ).group("aws_secret_access_key") + ) + + configure_input = [ + InteractiveInput( + prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id + ), + InteractiveInput( + prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key + ), + InteractiveInput(prompt_pattern=r".*", input=""), + InteractiveInput(prompt_pattern=r".*", input=""), + ] + shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + + def wait_until_finish(self): + for k6_instance in self.k6_instances: + k6_instance.wait_until_finished() + + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + self.k6_instances = [] + cycled_loaders = itertools.cycle(self.loaders) + + k6_distribution_count = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: len(self.loaders), + K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints), + } + endpoints_generators = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), + K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle( + [[endpoint] for endpoint in endpoints] + ), + } + k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] + endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] + + distributed_load_params_list = self._get_distributed_load_params_list( + load_params, k6_processes_count + ) + + for distributed_load_params in distributed_load_params_list: + loader = next(cycled_loaders) + shell = loader.get_shell() + with reporter.step( + f"Init K6 instances on {loader.ip} for load id {distributed_load_params.load_id}" + ): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {distributed_load_params.working_dir}") + shell.exec( + f"sudo chown {LOAD_NODE_SSH_USER} {distributed_load_params.working_dir}" + ) + + k6_instance = K6( + distributed_load_params, + next(endpoints_gen), + k6_dir, + shell, + loader, + self.loaders_wallet, + ) + self.k6_instances.append(k6_instance) + if load_params.preset: + k6_instance.preset() + + def _get_distributed_load_params_list( + self, original_load_params: LoadParams, workers_count: int + ) -> list[LoadParams]: + divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) + distributed_load_params: list[LoadParams] = [] + + for i in range(workers_count): + load_params = copy.deepcopy(original_load_params) + # Append #i here in case if multiple k6 processes goes into same load node + load_params.set_id(f"{load_params.load_id}_{i}") + distributed_load_params.append(load_params) + + load_fields = fields(original_load_params) + + for field in load_fields: + if ( + field.metadata + and original_load_params.scenario in field.metadata["applicable_scenarios"] + and field.metadata["distributed"] + and getattr(original_load_params, field.name) is not None + ): + original_value = getattr(original_load_params, field.name) + distribution = self._get_distribution( + math.ceil(original_value / divisor), workers_count + ) + for i in range(workers_count): + setattr(distributed_load_params[i], field.name, distribution[i]) + + return distributed_load_params + + def _get_distribution(self, clients_count: int, workers_count: int) -> list[int]: + """ + This function will distribute evenly as possible X clients to Y workers. + For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers) + this will return [38, 38, 37, 37]. + + Args: + clients_count: amount of things needs to be distributed. + workers_count: amount of workers. + + Returns: + list of distribution. + """ + if workers_count < 1: + raise Exception("Workers cannot be less then 1") + + # Amount of guaranteed payload on one worker + clients_per_worker = clients_count // workers_count + # Remainder of clients left to be distributed + remainder = clients_count - clients_per_worker * workers_count + + distribution = [ + clients_per_worker + 1 if i < remainder else clients_per_worker + for i in range(workers_count) + ] + return distribution + + def start(self): + load_params = self.k6_instances[0].load_params + + with ThreadPoolExecutor(max_workers=len(self.k6_instances)) as executor: + futures = [executor.submit(k6.start) for k6 in self.k6_instances] + + # Check for exceptions + exceptions = [future.exception() for future in futures if future.exception()] + if exceptions: + raise RuntimeError( + f"The following exceptions occured during start of k6: {exceptions}" + ) + + wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 + with reporter.step( + f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" + ): + time.sleep(wait_after_start_time) + + def stop(self): + for k6_instance in self.k6_instances: + k6_instance.stop() + + def get_results(self) -> dict: + results = {} + for k6_instance in self.k6_instances: + if k6_instance.load_params.k6_process_allocation_strategy is None: + raise RuntimeError("k6_process_allocation_strategy should not be none") + + result = k6_instance.get_results() + keys_map = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip, + K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0], + } + key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] + results[key] = result + + return results + + @property + def is_running(self): + for k6_instance in self.k6_instances: + if not k6_instance.is_running: + return False + + return True + + +class LocalRunner(ScenarioRunner): + k6_instances: list[K6] + loaders: list[Loader] + cluster_state_controller: ClusterStateController + file_keeper: FileKeeper + wallet: WalletInfo + + def __init__( + self, + cluster_state_controller: ClusterStateController, + file_keeper: FileKeeper, + nodes_under_load: list[ClusterNode], + ) -> None: + self.cluster_state_controller = cluster_state_controller + self.file_keeper = file_keeper + self.loaders = [NodeLoader(node) for node in nodes_under_load] + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Prepare load instances") + def prepare( + self, + load_params: LoadParams, + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + @reporter.step_deco("Prepare node {cluster_node}") + def prepare_node(cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + + with reporter.step("Allow storage user to login into system"): + shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") + + with reporter.step("Update limits.conf"): + limits_path = "/etc/security/limits.conf" + self.file_keeper.add(cluster_node.storage_node, limits_path) + content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" + shell.exec(f"echo '{content}' | sudo tee {limits_path}") + + with reporter.step("Download K6"): + shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") + shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") + shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}") + shell.exec(f"sudo chmod -R 777 {k6_dir}") + + with reporter.step("Create empty_passwd"): + self.wallet = WalletInfo( + f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml" + ) + content = yaml.dump({"password": ""}) + shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') + shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") + + with ThreadPoolExecutor(max_workers=len(nodes_under_load)) as executor: + result = executor.map(prepare_node, nodes_under_load) + + # Check for exceptions + for _ in result: + pass + + def wait_until_finish(self): + for k6_instance in self.k6_instances: + k6_instance.wait_until_finished() + + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + self.k6_instances = [] + for loader in self.loaders: + shell = loader.get_shell() + with reporter.step(f"Init K6 instances on {loader.ip}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params.working_dir}") + # If we chmod /home/ folder we can no longer ssh to the node + # !! IMPORTANT !! + if ( + load_params.working_dir + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" + ): + shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") + + k6_instance = K6( + load_params, + ["localhost:8080"], + k6_dir, + shell, + loader, + self.wallet, + ) + self.k6_instances.append(k6_instance) + if load_params.preset: + k6_instance.preset() + + def start(self): + load_params = self.k6_instances[0].load_params + + self.cluster_state_controller.stop_all_s3_gates() + self.cluster_state_controller.stop_all_storage_services() + + with ThreadPoolExecutor(max_workers=len(self.k6_instances)) as executor: + futures = [executor.submit(k6.start) for k6 in self.k6_instances] + + # Check for exceptions + exceptions = [future.exception() for future in futures if future.exception()] + if exceptions: + raise RuntimeError( + f"The following exceptions occured during start of k6: {exceptions}" + ) + + wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 + with reporter.step( + f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" + ): + time.sleep(wait_after_start_time) + + def stop(self): + for k6_instance in self.k6_instances: + k6_instance.stop() + + self.cluster_state_controller.start_stopped_storage_services() + self.cluster_state_controller.start_stopped_s3_gates() + + def get_results(self) -> dict: + results = {} + for k6_instance in self.k6_instances: + result = k6_instance.get_results() + results[k6_instance.loader.ip] = result + + return results + + @property + def is_running(self): + for k6_instance in self.k6_instances: + if not k6_instance.is_running: + return False + + return True diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 7f49000..d92d77a 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -10,13 +10,16 @@ from tenacity.wait import wait_fixed from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import Shell -from frostfs_testlib.shell.interfaces import CommandOptions +from frostfs_testlib.shell.command_inspectors import SuInspector +from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions reporter = get_reporter() class RemoteProcess: - def __init__(self, cmd: str, process_dir: str, shell: Shell): + def __init__( + self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector] + ): self.process_dir = process_dir self.cmd = cmd self.stdout_last_line_number = 0 @@ -26,10 +29,13 @@ class RemoteProcess: self.saved_stdout: Optional[str] = None self.saved_stderr: Optional[str] = None self.shell = shell + self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] @classmethod @reporter.step_deco("Create remote process") - def create(cls, command: str, shell: Shell, working_dir: str = "/tmp") -> RemoteProcess: + def create( + cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None + ) -> RemoteProcess: """ Create a process on a remote host. @@ -39,6 +45,7 @@ class RemoteProcess: rc: contains script return code stderr: contains script errors stdout: contains script output + user: user on behalf whom command will be executed Args: shell: Shell instance @@ -48,8 +55,12 @@ class RemoteProcess: Returns: RemoteProcess instance for further examination """ + cmd_inspector = SuInspector(user) if user else None remote_process = cls( - cmd=command, process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), shell=shell + cmd=command, + process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), + shell=shell, + cmd_inspector=cmd_inspector, ) remote_process._create_process_dir() remote_process._generate_command_script(command) @@ -73,7 +84,8 @@ class RemoteProcess: cur_stdout = self.saved_stdout else: terminal = self.shell.exec( - f"cat {self.process_dir}/stdout", options=CommandOptions(no_log=True) + f"cat {self.process_dir}/stdout", + options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors), ) if self.proc_rc is not None: self.saved_stdout = terminal.stdout @@ -104,7 +116,8 @@ class RemoteProcess: cur_stderr = self.saved_stderr else: terminal = self.shell.exec( - f"cat {self.process_dir}/stderr", options=CommandOptions(no_log=True) + f"cat {self.process_dir}/stderr", + options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors), ) if self.proc_rc is not None: self.saved_stderr = terminal.stdout @@ -123,7 +136,10 @@ class RemoteProcess: if self.proc_rc is not None: return self.proc_rc - terminal = self.shell.exec(f"cat {self.process_dir}/rc", CommandOptions(check=False)) + terminal = self.shell.exec( + f"cat {self.process_dir}/rc", + CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True), + ) if "No such file or directory" in terminal.stderr: return None elif terminal.stderr or terminal.return_code != 0: @@ -138,7 +154,10 @@ class RemoteProcess: @reporter.step_deco("Send signal to process") def send_signal(self, signal: int) -> None: - kill_res = self.shell.exec(f"kill -{signal} {self.pid}", CommandOptions(check=False)) + kill_res = self.shell.exec( + f"kill -{signal} {self.pid}", + CommandOptions(check=False, extra_inspectors=self.cmd_inspectors), + ) if "No such process" in kill_res.stderr: return if kill_res.return_code: @@ -158,27 +177,38 @@ class RemoteProcess: def clear(self) -> None: if self.process_dir == "/": raise AssertionError(f"Invalid path to delete: {self.process_dir}") - self.shell.exec(f"rm -rf {self.process_dir}") + self.shell.exec( + f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) + ) @reporter.step_deco("Start remote process") def _start_process(self) -> None: self.shell.exec( f"nohup {self.process_dir}/command.sh {self.process_dir}/stdout " - f"2>{self.process_dir}/stderr &" + f"2>{self.process_dir}/stderr &", + CommandOptions(extra_inspectors=self.cmd_inspectors), ) @reporter.step_deco("Create process directory") def _create_process_dir(self) -> None: - self.shell.exec(f"mkdir {self.process_dir}") - self.shell.exec(f"chmod 777 {self.process_dir}") - terminal = self.shell.exec(f"realpath {self.process_dir}") + self.shell.exec( + f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) + ) + self.shell.exec( + f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) + ) + terminal = self.shell.exec( + f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) + ) self.process_dir = terminal.stdout.strip() @reporter.step_deco("Get pid") @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) def _get_pid(self) -> str: - terminal = self.shell.exec(f"cat {self.process_dir}/pid") + terminal = self.shell.exec( + f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors) + ) assert terminal.stdout, f"invalid pid: {terminal.stdout}" return terminal.stdout.strip() @@ -196,6 +226,15 @@ class RemoteProcess: f"echo $? > {self.process_dir}/rc" ) - self.shell.exec(f'echo "{script}" > {self.process_dir}/command.sh') - self.shell.exec(f"cat {self.process_dir}/command.sh") - self.shell.exec(f"chmod +x {self.process_dir}/command.sh") + self.shell.exec( + f'echo "{script}" > {self.process_dir}/command.sh', + CommandOptions(extra_inspectors=self.cmd_inspectors), + ) + self.shell.exec( + f"cat {self.process_dir}/command.sh", + CommandOptions(extra_inspectors=self.cmd_inspectors), + ) + self.shell.exec( + f"chmod +x {self.process_dir}/command.sh", + CommandOptions(extra_inspectors=self.cmd_inspectors), + ) diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py index 8e00b26..fef815d 100644 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -13,7 +13,7 @@ class AllureHandler(ReporterHandler): """Handler that stores test artifacts in Allure report.""" def step(self, name: str) -> AbstractContextManager: - name = shorten(name, width=70, placeholder="...") + name = shorten(name, width=140, placeholder="...") return allure.step(name) def step_decorator(self, name: str) -> Callable: diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index dfbb3a1..131bf8a 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -10,6 +10,8 @@ COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000") SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m") +STORAGE_USER_NAME = "frostfs-storage" + MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s") MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "8s") FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s") diff --git a/src/frostfs_testlib/shell/command_inspectors.py b/src/frostfs_testlib/shell/command_inspectors.py index 8486f43..8fe2f34 100644 --- a/src/frostfs_testlib/shell/command_inspectors.py +++ b/src/frostfs_testlib/shell/command_inspectors.py @@ -7,7 +7,23 @@ class SudoInspector(CommandInspector): If command is already prepended with sudo, then has no effect. """ - def inspect(self, command: str) -> str: + def inspect(self, original_command: str, command: str) -> str: if not command.startswith("sudo"): return f"sudo {command}" return command + + +class SuInspector(CommandInspector): + """Allows to run command as another user via sudo su call + + If command is already prepended with sudo su, then has no effect. + """ + + def __init__(self, user: str) -> None: + self.user = user + + def inspect(self, original_command: str, command: str) -> str: + if not original_command.startswith("sudo su"): + cmd = original_command.replace('"', '\\"').replace("\$", "\\\\\\$") + return f'sudo su - {self.user} -c "{cmd}"' + return original_command diff --git a/src/frostfs_testlib/shell/interfaces.py b/src/frostfs_testlib/shell/interfaces.py index 219bc7c..a8d3325 100644 --- a/src/frostfs_testlib/shell/interfaces.py +++ b/src/frostfs_testlib/shell/interfaces.py @@ -22,11 +22,12 @@ class CommandInspector(ABC): """Interface of inspector that processes command text before execution.""" @abstractmethod - def inspect(self, command: str) -> str: + def inspect(self, original_command: str, command: str) -> str: """Transforms command text and returns modified command. Args: command: Command to transform with this inspector. + original_command: Untransformed command to transform with this inspector. Depending on type of the inspector it might be required to modify original command Returns: Transformed command text. @@ -47,6 +48,7 @@ class CommandOptions: check: Controls whether to check return code of the command. Set to False to ignore non-zero return codes. no_log: Do not print output to logger if True. + extra_inspectors: Exctra command inspectors to process command """ interactive_inputs: Optional[list[InteractiveInput]] = None @@ -54,6 +56,7 @@ class CommandOptions: timeout: Optional[int] = None check: bool = True no_log: bool = False + extra_inspectors: Optional[list[CommandInspector]] = None def __post_init__(self): if self.timeout is None: diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 12f450a..56d19b2 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -24,8 +24,10 @@ class LocalShell(Shell): # If no options were provided, use default options options = options or CommandOptions() - for inspector in self.command_inspectors: - command = inspector.inspect(command) + original_command = command + extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] + for inspector in [*self.command_inspectors, *extra_inspectors]: + command = inspector.inspect(original_command, command) logger.info(f"Executing command: {command}") if options.interactive_inputs: diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 6ef3dfb..5771274 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -126,8 +126,10 @@ class SSHShell(Shell): def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: options = options or CommandOptions() - for inspector in self.command_inspectors: - command = inspector.inspect(command) + original_command = command + extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] + for inspector in [*self.command_inspectors, *extra_inspectors]: + command = inspector.inspect(original_command, command) if options.interactive_inputs: result = self._exec_interactive(command, options) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index a2336be..6cedd0f 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -1,50 +1,37 @@ import copy -import time +from typing import Optional import frostfs_testlib.resources.optionals as optionals -from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.interfaces import ScenarioRunner from frostfs_testlib.load.load_config import ( EndpointSelectionStrategy, - K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType, ) from frostfs_testlib.load.load_report import LoadReport -from frostfs_testlib.load.load_steps import init_s3_client, prepare_k6_instances from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.load_params import ( - K6_TEARDOWN_PERIOD, - LOAD_NODE_SSH_PASSWORD, - LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, - LOAD_NODE_SSH_PRIVATE_KEY_PATH, - LOAD_NODE_SSH_USER, - LOAD_NODES, -) -from frostfs_testlib.shell.interfaces import SshCredentials from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import run_optionally -from frostfs_testlib.utils import datetime_utils reporter = get_reporter() class BackgroundLoadController: - k6_instances: list[K6] k6_dir: str load_params: LoadParams original_load_params: LoadParams - load_nodes: list[str] verification_params: LoadParams nodes_under_load: list[ClusterNode] load_counter: int - ssh_credentials: SshCredentials loaders_wallet: WalletInfo load_summaries: dict endpoints: list[str] + runner: ScenarioRunner + started: bool def __init__( self, @@ -52,15 +39,16 @@ class BackgroundLoadController: load_params: LoadParams, loaders_wallet: WalletInfo, nodes_under_load: list[ClusterNode], + runner: ScenarioRunner, ) -> None: self.k6_dir = k6_dir self.original_load_params = load_params self.load_params = copy.deepcopy(self.original_load_params) self.nodes_under_load = nodes_under_load self.load_counter = 1 - self.load_nodes = LOAD_NODES self.loaders_wallet = loaders_wallet - + self.runner = runner + self.started = False if load_params.endpoint_selection_strategy is None: raise RuntimeError("endpoint_selection_strategy should not be None") @@ -68,13 +56,6 @@ class BackgroundLoadController: load_params.load_type, load_params.endpoint_selection_strategy ) - self.ssh_credentials = SshCredentials( - LOAD_NODE_SSH_USER, - LOAD_NODE_SSH_PASSWORD, - LOAD_NODE_SSH_PRIVATE_KEY_PATH, - LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, - ) - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) def _get_endpoints( self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy @@ -116,69 +97,28 @@ class BackgroundLoadController: return all_endpoints[load_type][endpoint_selection_strategy] @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Prepare background load instances") + @reporter.step_deco("Prepare load instances") def prepare(self): - if self.load_params.load_type == LoadType.S3: - init_s3_client( - self.load_nodes, - self.load_params, - self.k6_dir, - self.ssh_credentials, - self.nodes_under_load, - self.loaders_wallet, - ) - - self._prepare(self.load_params) - - def _prepare(self, load_params: LoadParams): - self.k6_instances = prepare_k6_instances( - load_nodes=LOAD_NODES, - ssh_credentials=self.ssh_credentials, - k6_dir=self.k6_dir, - load_params=load_params, - endpoints=self.endpoints, - loaders_wallet=self.loaders_wallet, - ) + self.runner.prepare(self.load_params, self.nodes_under_load, self.k6_dir) + self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Start background load") def start(self): - if self.load_params.preset is None: - raise RuntimeError("Preset should not be none at the moment of start") - - with reporter.step( - f"Start background load on nodes {self.nodes_under_load}: " - f"writers = {self.load_params.writers}, " - f"obj_size = {self.load_params.object_size}, " - f"load_time = {self.load_params.load_time}, " - f"prepare_json = {self.load_params.preset.pregen_json}, " - f"endpoints = {self.endpoints}" - ): - for k6_load_instance in self.k6_instances: - k6_load_instance.start() - - wait_after_start_time = datetime_utils.parse_time(self.load_params.setup_timeout) + 5 - with reporter.step( - f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" - ): - time.sleep(wait_after_start_time) + with reporter.step(f"Start load on nodes {self.nodes_under_load}"): + self.runner.start() + self.started = True @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop background load") + @reporter.step_deco("Stop load") def stop(self): - for k6_load_instance in self.k6_instances: - k6_load_instance.stop() + self.runner.stop() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True) - def is_running(self): - for k6_load_instance in self.k6_instances: - if not k6_load_instance.is_running: - return False - - return True + def is_running(self) -> bool: + return self.runner.is_running @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Reset background load") + @reporter.step_deco("Reset load") def _reset_for_consequent_load(self): """This method is required if we want to run multiple loads during test run. Raise load counter by 1 and append it to load_id @@ -188,25 +128,25 @@ class BackgroundLoadController: self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Startup background load") + @reporter.step_deco("Startup load") def startup(self): self.prepare() self.start() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop and get results of background load") - def teardown(self, load_report: LoadReport = None): - if not self.k6_instances: + @reporter.step_deco("Stop and get results of load") + def teardown(self, load_report: Optional[LoadReport] = None): + if not self.started: return self.stop() - self.load_summaries = self.get_results() - self.k6_instances = [] + self.load_summaries = self._get_results() + self.started = False if load_report: load_report.add_summaries(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify results of background load") + @reporter.step_deco("Verify results of load") def verify(self): try: if self.load_params.verify: @@ -220,9 +160,10 @@ class BackgroundLoadController: working_dir=self.load_params.working_dir, endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, + setup_timeout="1s", ) self._run_verify_scenario() - verification_summaries = self.get_results() + verification_summaries = self._get_results() self.verify_summaries(self.load_summaries, verification_summaries) finally: self._reset_for_consequent_load() @@ -239,38 +180,20 @@ class BackgroundLoadController: @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) def wait_until_finish(self): - if self.load_params.load_time is None: - raise RuntimeError("LoadTime should not be none") - - for k6_instance in self.k6_instances: - k6_instance.wait_until_finished(self.load_params.load_time + int(K6_TEARDOWN_PERIOD)) + self.runner.wait_until_finish() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Run verify scenario for background load") + @reporter.step_deco("Run verify scenario") def _run_verify_scenario(self): if self.verification_params.verify_time is None: raise RuntimeError("verify_time should not be none") - self._prepare(self.verification_params) - with reporter.step("Run verify background load data"): - for k6_verify_instance in self.k6_instances: - k6_verify_instance.start() - k6_verify_instance.wait_until_finished(self.verification_params.verify_time) + self.runner.init_k6_instances(self.verification_params, self.endpoints, self.k6_dir) + with reporter.step("Run verify load data"): + self.runner.start() + self.runner.wait_until_finish() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("K6 run results") - def get_results(self) -> dict: - results = {} - for k6_instance in self.k6_instances: - if k6_instance.load_params.k6_process_allocation_strategy is None: - raise RuntimeError("k6_process_allocation_strategy should not be none") - - result = k6_instance.get_results() - keys_map = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.load_node, - K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0], - } - key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] - results[key] = result - - return results + @reporter.step_deco("Get load results") + def _get_results(self) -> dict: + return self.runner.get_results() diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 1084552..6126f9d 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -23,7 +23,7 @@ class ClusterStateController: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} self.stopped_storage_nodes: list[ClusterNode] = [] - self.stopped_s3_gate: list[ClusterNode] = [] + self.stopped_s3_gates: list[ClusterNode] = [] self.cluster = cluster self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} @@ -60,6 +60,16 @@ class ClusterStateController: for node in nodes: self.stop_storage_service(node) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop all S3 gates on cluster") + def stop_all_s3_gates(self, reversed_order: bool = False): + nodes = ( + reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + ) + + for node in nodes: + self.stop_s3_gate(node) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start host of node {node}") def start_node_host(self, node: ClusterNode): @@ -72,10 +82,18 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped hosts") def start_stopped_hosts(self, reversed_order: bool = False): + if not self.stopped_nodes: + return + nodes = reversed(self.stopped_nodes) if reversed_order else self.stopped_nodes for node in nodes: with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() + if node in self.stopped_storage_nodes: + self.stopped_storage_nodes.remove(node) + + if node in self.stopped_s3_gates: + self.stopped_s3_gates.remove(node) self.stopped_nodes = [] wait_all_storage_nodes_returned(self.shell, self.cluster) @@ -115,44 +133,51 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped storage services") def start_stopped_storage_services(self): - if self.stopped_storage_nodes: - # In case if we stopped couple services, for example (s01-s04): - # After starting only s01, it may require connections to s02-s04, which is still down, and fail to start. - # Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state. - # So in order to make sure that services are at least attempted to be started, using threads here. - with ThreadPoolExecutor(max_workers=len(self.stopped_storage_nodes)) as executor: - start_result = executor.map(self.start_storage_service, self.stopped_storage_nodes) + if not self.stopped_storage_nodes: + return - # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, - # But will be thrown here. - # Not ideal solution, but okay for now - for _ in start_result: - pass + # In case if we stopped couple services, for example (s01-s04): + # After starting only s01, it may require connections to s02-s04, which is still down, and fail to start. + # Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state. + # So in order to make sure that services are at least attempted to be started, using threads here. + with ThreadPoolExecutor(max_workers=len(self.stopped_storage_nodes)) as executor: + start_result = executor.map(self.start_storage_service, self.stopped_storage_nodes) + + # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, + # But will be thrown here. + # Not ideal solution, but okay for now + for _ in start_result: + pass wait_all_storage_nodes_returned(self.shell, self.cluster) self.stopped_storage_nodes = [] - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop s3 gate on {node}") def stop_s3_gate(self, node: ClusterNode): node.s3_gate.stop_service() - self.stopped_s3_gate.append(node) + self.stopped_s3_gates.append(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start s3 gate on {node}") def start_s3_gate(self, node: ClusterNode): node.s3_gate.start_service() - self.stopped_s3_gate.remove(node) - + self.stopped_s3_gates.remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped S3 gates") - def start_stopped_s3_gate(self): - # not sure if we need here to use threads like in start_stopped_storage_services - for s3_gate in self.stopped_s3_gate: - s3_gate.start_service() - self.stopped_s3_gate = [] + def start_stopped_s3_gates(self): + if not self.stopped_s3_gates: + return + + with ThreadPoolExecutor(max_workers=len(self.stopped_s3_gates)) as executor: + start_result = executor.map(self.start_s3_gate, self.stopped_s3_gates) + + # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, + # But will be thrown here. + # Not ideal solution, but okay for now + for _ in start_result: + pass @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Suspend {process_name} service in {node}") From 889e108be9d3fb923eb7d43bbd4c0cc0283b03a8 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 27 Jun 2023 11:47:42 +0300 Subject: [PATCH 012/274] Update epoch align check Signed-off-by: Andrey Berezin --- src/frostfs_testlib/steps/epoch.py | 18 ++++++++---------- .../controllers/cluster_state_controller.py | 11 ----------- 2 files changed, 8 insertions(+), 21 deletions(-) diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index 0d40f8d..a589569 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -54,16 +54,14 @@ def ensure_fresh_epoch( return epoch -@reporter.step_deco("Wait for epochs align in whole cluster") -@wait_for_success(60, 5) -def wait_for_epochs_align(shell: Shell, cluster: Cluster) -> None: - epochs = [] - for node in cluster.services(StorageNode): - epochs.append(get_epoch(shell, cluster, node)) - unique_epochs = list(set(epochs)) - assert ( - len(unique_epochs) == 1 - ), f"unaligned epochs found, {epochs}, count of unique epochs {len(unique_epochs)}" +@reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") +def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): + @wait_for_success(timeout, 5, None, True) + def check_epochs(): + epochs_by_node = get_epochs_from_nodes(shell, cluster) + assert len(set(epochs_by_node.values())) == 1, f"unaligned epochs found: {epochs_by_node}" + + check_epochs() @reporter.step_deco("Get Epoch") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 6126f9d..c73a8f4 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -220,17 +220,6 @@ class ClusterStateController: wait_for_host_online(self.shell, node.storage_node) wait_for_node_online(node.storage_node) - @reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") - def wait_for_epochs_align(self, timeout=60): - @wait_for_success(timeout, 5, None, True) - def check_epochs(): - epochs_by_node = epoch.get_epochs_from_nodes(self.shell, self.cluster) - assert ( - len(set(epochs_by_node.values())) == 1 - ), f"unaligned epochs found: {epochs_by_node}" - - check_epochs() - def _get_disk_controller( self, node: StorageNode, device: str, mountpoint: str ) -> DiskController: From ac28df2652a1cc875ab84c755191db30d4f1a242 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 26 Jun 2023 16:48:45 +0300 Subject: [PATCH 013/274] Removed --bearer_rules parameter from init s3 credentials due to changes in 1.3 --- src/frostfs_testlib/cli/frostfs_authmate/secret.py | 1 - src/frostfs_testlib/steps/s3/s3_helper.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_authmate/secret.py b/src/frostfs_testlib/cli/frostfs_authmate/secret.py index ba5b5f5..5f300bc 100644 --- a/src/frostfs_testlib/cli/frostfs_authmate/secret.py +++ b/src/frostfs_testlib/cli/frostfs_authmate/secret.py @@ -44,7 +44,6 @@ class FrostfsAuthmateSecret(CliCommand): wallet: str, wallet_password: str, peer: str, - bearer_rules: str, gate_public_key: Union[str, list[str]], address: Optional[str] = None, container_id: Optional[str] = None, diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index d6c2095..ae27124 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -189,7 +189,6 @@ def init_s3_credentials( wallet: WalletInfo, shell: Shell, cluster: Cluster, - s3_bearer_rules_file: str, policy: Optional[dict] = None, s3gates: Optional[list[S3Gate]] = None, ): @@ -203,7 +202,6 @@ def init_s3_credentials( issue_secret_output = frostfs_authmate_exec.secret.issue( wallet=wallet.path, peer=cluster.default_rpc_endpoint, - bearer_rules=s3_bearer_rules_file, gate_public_key=gate_public_keys, wallet_password=wallet.password, container_policy=policy, From 05ac39248504927a246e2a1e4049469bf3eed9b6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 27 Jun 2023 17:06:01 +0300 Subject: [PATCH 014/274] Remove deleted parameter Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/runners.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 6f9d046..d8758f6 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -77,7 +77,6 @@ class DefaultRunner(ScenarioRunner): issue_secret_output = frostfs_authmate_exec.secret.issue( wallet=self.loaders_wallet.path, peer=grpc_peer, - bearer_rules=f"{k6_dir}/scenarios/files/rules.json", gate_public_key=s3_public_keys, container_placement_policy=load_params.preset.container_placement_policy, container_policy=f"{k6_dir}/scenarios/files/policy.json", From a14b082a4da57dd3346901951d95dae45912f2ed Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 4 Jul 2023 19:25:24 +0300 Subject: [PATCH 015/274] Make load things parallel Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/interfaces.py | 4 + src/frostfs_testlib/load/k6.py | 83 +++--- src/frostfs_testlib/load/load_report.py | 8 +- src/frostfs_testlib/load/runners.py | 254 +++++++++--------- .../controllers/background_load_controller.py | 12 +- src/frostfs_testlib/testing/__init__.py | 2 + src/frostfs_testlib/testing/parallel.py | 98 +++++++ src/frostfs_testlib/utils/__init__.py | 1 + 8 files changed, 284 insertions(+), 178 deletions(-) create mode 100644 src/frostfs_testlib/testing/__init__.py create mode 100644 src/frostfs_testlib/testing/parallel.py diff --git a/src/frostfs_testlib/load/interfaces.py b/src/frostfs_testlib/load/interfaces.py index fbbc20b..6f29868 100644 --- a/src/frostfs_testlib/load/interfaces.py +++ b/src/frostfs_testlib/load/interfaces.py @@ -39,6 +39,10 @@ class ScenarioRunner(ABC): def stop(self): """Stop K6 instances""" + @abstractmethod + def preset(self): + """Run preset for load""" + @property @abstractmethod def is_running(self) -> bool: diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index ca3f696..7ec3c21 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -72,58 +72,58 @@ class K6: def process_dir(self) -> str: return self._k6_process.process_dir - @reporter.step_deco("Preset containers and objects") def preset(self) -> str: - preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py" - preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py" - preset_map = { - LoadType.gRPC: preset_grpc, - LoadType.S3: preset_s3, - LoadType.HTTP: preset_grpc, - } + with reporter.step(f"Run preset on loader {self.loader.ip} for endpoints {self.endpoints}"): + preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py" + preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py" + preset_map = { + LoadType.gRPC: preset_grpc, + LoadType.S3: preset_s3, + LoadType.HTTP: preset_grpc, + } - base_args = { - preset_grpc: [ - preset_grpc, - f"--endpoint {self.endpoints[0]}", - f"--wallet {self.wallet.path} ", - f"--config {self.wallet.config_path} ", - ], - preset_s3: [ - preset_s3, - f"--endpoint {self.endpoints[0]}", - ], - } + base_args = { + preset_grpc: [ + preset_grpc, + f"--endpoint {','.join(self.endpoints)}", + f"--wallet {self.wallet.path} ", + f"--config {self.wallet.config_path} ", + ], + preset_s3: [ + preset_s3, + f"--endpoint {','.join(self.endpoints)}", + ], + } - preset_scenario = preset_map[self.load_params.load_type] - command_args = base_args[preset_scenario].copy() + preset_scenario = preset_map[self.load_params.load_type] + command_args = base_args[preset_scenario].copy() - command_args += [ - f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'" - for field in fields(self.load_params) - if field.metadata - and self.scenario in field.metadata["applicable_scenarios"] - and field.metadata["preset_argument"] - and getattr(self.load_params, field.name) is not None - ] - - if self.load_params.preset: command_args += [ - f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'" - for field in fields(self.load_params.preset) + f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'" + for field in fields(self.load_params) if field.metadata and self.scenario in field.metadata["applicable_scenarios"] and field.metadata["preset_argument"] - and getattr(self.load_params.preset, field.name) is not None + and getattr(self.load_params, field.name) is not None ] - command = " ".join(command_args) - result = self.shell.exec(command) + if self.load_params.preset: + command_args += [ + f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'" + for field in fields(self.load_params.preset) + if field.metadata + and self.scenario in field.metadata["applicable_scenarios"] + and field.metadata["preset_argument"] + and getattr(self.load_params.preset, field.name) is not None + ] - assert ( - result.return_code == EXIT_RESULT_CODE - ), f"Return code of preset is not zero: {result.stdout}" - return result.stdout.strip("\n") + command = " ".join(command_args) + result = self.shell.exec(command) + + assert ( + result.return_code == EXIT_RESULT_CODE + ), f"Return code of preset is not zero: {result.stdout}" + return result.stdout.strip("\n") @reporter.step_deco("Generate K6 command") def _generate_env_variables(self) -> str: @@ -232,7 +232,6 @@ class K6: self._wait_until_process_end() - @property def is_running(self) -> bool: if self._k6_process: return self._k6_process.running() diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 7f912e4..dcd81b4 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -43,8 +43,10 @@ class LoadReport: return html def _get_load_params_section_html(self) -> str: - params: str = yaml.safe_dump(self.load_test, sort_keys=False) - params = params.replace("\n", "
") + params: str = yaml.safe_dump( + [self.load_test], sort_keys=False, indent=2, explicit_start=True + ) + params = params.replace("\n", "
").replace(" ", " ") section_html = f"""

Scenario params

{params}
@@ -139,7 +141,7 @@ class LoadReport: duration = self._seconds_to_formatted_duration(self.load_params.load_time) model = self._get_model_string() # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s - short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit} {total_rate:.2f}/s" + short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s" html = f""" diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index d8758f6..d6cf2ae 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -28,15 +28,31 @@ from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing.test_control import run_optionally -from frostfs_testlib.utils import datetime_utils -from frostfs_testlib.utils.file_keeper import FileKeeper +from frostfs_testlib.testing import parallel, run_optionally +from frostfs_testlib.utils import FileKeeper, datetime_utils reporter = get_reporter() -class DefaultRunner(ScenarioRunner): +class RunnerBase(ScenarioRunner): k6_instances: list[K6] + + @reporter.step_deco("Run preset on loaders") + def preset(self): + parallel([k6.preset for k6 in self.k6_instances]) + + @reporter.step_deco("Wait until load finish") + def wait_until_finish(self): + parallel([k6.wait_until_finished for k6 in self.k6_instances]) + + @property + def is_running(self): + futures = parallel([k6.is_running for k6 in self.k6_instances]) + + return any([future.result() for future in futures]) + + +class DefaultRunner(RunnerBase): loaders: list[Loader] loaders_wallet: WalletInfo @@ -51,7 +67,7 @@ class DefaultRunner(ScenarioRunner): self.loaders_wallet = loaders_wallet @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Prepare load instances") + @reporter.step_deco("Preparation steps") def prepare( self, load_params: LoadParams, @@ -68,48 +84,52 @@ class DefaultRunner(ScenarioRunner): ] grpc_peer = storage_node.get_rpc_endpoint() - for loader in self.loaders: - with reporter.step(f"Init s3 client on {loader.ip}"): - shell = loader.get_shell() - frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate( - shell, FROSTFS_AUTHMATE_EXEC - ) - issue_secret_output = frostfs_authmate_exec.secret.issue( - wallet=self.loaders_wallet.path, - peer=grpc_peer, - gate_public_key=s3_public_keys, - container_placement_policy=load_params.preset.container_placement_policy, - container_policy=f"{k6_dir}/scenarios/files/policy.json", - wallet_password=self.loaders_wallet.password, - ).stdout - aws_access_key_id = str( - re.search( - r"access_key_id.*:\s.(?P\w*)", issue_secret_output - ).group("aws_access_key_id") - ) - aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", - issue_secret_output, - ).group("aws_secret_access_key") - ) + parallel( + self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir + ) - configure_input = [ - InteractiveInput( - prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id - ), - InteractiveInput( - prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key - ), - InteractiveInput(prompt_pattern=r".*", input=""), - InteractiveInput(prompt_pattern=r".*", input=""), - ] - shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + def _prepare_loader( + self, + loader: Loader, + load_params: LoadParams, + grpc_peer: str, + s3_public_keys: list[str], + k6_dir: str, + ): + with reporter.step(f"Init s3 client on {loader.ip}"): + shell = loader.get_shell() + frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate_exec.secret.issue( + wallet=self.loaders_wallet.path, + peer=grpc_peer, + gate_public_key=s3_public_keys, + container_placement_policy=load_params.preset.container_placement_policy, + container_policy=f"{k6_dir}/scenarios/files/policy.json", + wallet_password=self.loaders_wallet.password, + ).stdout + aws_access_key_id = str( + re.search( + r"access_key_id.*:\s.(?P\w*)", issue_secret_output + ).group("aws_access_key_id") + ) + aws_secret_access_key = str( + re.search( + r"secret_access_key.*:\s.(?P\w*)", + issue_secret_output, + ).group("aws_secret_access_key") + ) - def wait_until_finish(self): - for k6_instance in self.k6_instances: - k6_instance.wait_until_finished() + configure_input = [ + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), + InteractiveInput( + prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key + ), + InteractiveInput(prompt_pattern=r".*", input=""), + InteractiveInput(prompt_pattern=r".*", input=""), + ] + shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + @reporter.step_deco("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] cycled_loaders = itertools.cycle(self.loaders) @@ -131,29 +151,32 @@ class DefaultRunner(ScenarioRunner): load_params, k6_processes_count ) - for distributed_load_params in distributed_load_params_list: - loader = next(cycled_loaders) - shell = loader.get_shell() - with reporter.step( - f"Init K6 instances on {loader.ip} for load id {distributed_load_params.load_id}" - ): - with reporter.step(f"Make working directory"): - shell.exec(f"sudo mkdir -p {distributed_load_params.working_dir}") - shell.exec( - f"sudo chown {LOAD_NODE_SSH_USER} {distributed_load_params.working_dir}" - ) + futures = parallel( + self._init_k6_instance, + distributed_load_params_list, + loader=cycled_loaders, + endpoints=endpoints_gen, + k6_dir=k6_dir, + ) + self.k6_instances = [future.result() for future in futures] - k6_instance = K6( - distributed_load_params, - next(endpoints_gen), - k6_dir, - shell, - loader, - self.loaders_wallet, - ) - self.k6_instances.append(k6_instance) - if load_params.preset: - k6_instance.preset() + def _init_k6_instance( + self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str + ): + shell = loader.get_shell() + with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params_for_loader.working_dir}") + shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {load_params_for_loader.working_dir}") + + return K6( + load_params_for_loader, + endpoints, + k6_dir, + shell, + loader, + self.loaders_wallet, + ) def _get_distributed_load_params_list( self, original_load_params: LoadParams, workers_count: int @@ -215,15 +238,7 @@ class DefaultRunner(ScenarioRunner): def start(self): load_params = self.k6_instances[0].load_params - with ThreadPoolExecutor(max_workers=len(self.k6_instances)) as executor: - futures = [executor.submit(k6.start) for k6 in self.k6_instances] - - # Check for exceptions - exceptions = [future.exception() for future in futures if future.exception()] - if exceptions: - raise RuntimeError( - f"The following exceptions occured during start of k6: {exceptions}" - ) + parallel([k6.start for k6 in self.k6_instances]) wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 with reporter.step( @@ -251,17 +266,8 @@ class DefaultRunner(ScenarioRunner): return results - @property - def is_running(self): - for k6_instance in self.k6_instances: - if not k6_instance.is_running: - return False - return True - - -class LocalRunner(ScenarioRunner): - k6_instances: list[K6] +class LocalRunner(RunnerBase): loaders: list[Loader] cluster_state_controller: ClusterStateController file_keeper: FileKeeper @@ -278,7 +284,7 @@ class LocalRunner(ScenarioRunner): self.loaders = [NodeLoader(node) for node in nodes_under_load] @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Prepare load instances") + @reporter.step_deco("Preparation steps") def prepare( self, load_params: LoadParams, @@ -319,37 +325,39 @@ class LocalRunner(ScenarioRunner): for _ in result: pass - def wait_until_finish(self): - for k6_instance in self.k6_instances: - k6_instance.wait_until_finished() - + @reporter.step_deco("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] - for loader in self.loaders: - shell = loader.get_shell() - with reporter.step(f"Init K6 instances on {loader.ip}"): - with reporter.step(f"Make working directory"): - shell.exec(f"sudo mkdir -p {load_params.working_dir}") - # If we chmod /home/ folder we can no longer ssh to the node - # !! IMPORTANT !! - if ( - load_params.working_dir - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" - ): - shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") + futures = parallel( + self._init_k6_instance, + self.loaders, + load_params, + k6_dir, + ) + self.k6_instances = [future.result() for future in futures] - k6_instance = K6( - load_params, - ["localhost:8080"], - k6_dir, - shell, - loader, - self.wallet, - ) - self.k6_instances.append(k6_instance) - if load_params.preset: - k6_instance.preset() + def _init_k6_instance(self, loader: Loader, load_params: LoadParams, k6_dir: str): + shell = loader.get_shell() + with reporter.step(f"Init K6 instance on {loader.ip}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params.working_dir}") + # If we chmod /home/ folder we can no longer ssh to the node + # !! IMPORTANT !! + if ( + load_params.working_dir + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" + ): + shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") + + return K6( + load_params, + ["localhost:8080"], + k6_dir, + shell, + loader, + self.wallet, + ) def start(self): load_params = self.k6_instances[0].load_params @@ -357,15 +365,7 @@ class LocalRunner(ScenarioRunner): self.cluster_state_controller.stop_all_s3_gates() self.cluster_state_controller.stop_all_storage_services() - with ThreadPoolExecutor(max_workers=len(self.k6_instances)) as executor: - futures = [executor.submit(k6.start) for k6 in self.k6_instances] - - # Check for exceptions - exceptions = [future.exception() for future in futures if future.exception()] - if exceptions: - raise RuntimeError( - f"The following exceptions occured during start of k6: {exceptions}" - ) + parallel([k6.start for k6 in self.k6_instances]) wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 with reporter.step( @@ -387,11 +387,3 @@ class LocalRunner(ScenarioRunner): results[k6_instance.loader.ip] = result return results - - @property - def is_running(self): - for k6_instance in self.k6_instances: - if not k6_instance.is_running: - return False - - return True diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 6cedd0f..ac3a920 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -80,14 +80,17 @@ class BackgroundLoadController: LoadType.S3: { EndpointSelectionStrategy.ALL: list( set( - endpoint.replace("http://", "") + endpoint.replace("http://", "").replace("https://", "") for node_under_load in self.nodes_under_load for endpoint in node_under_load.service(S3Gate).get_all_endpoints() ) ), EndpointSelectionStrategy.FIRST: list( set( - node_under_load.service(S3Gate).get_endpoint().replace("http://", "") + node_under_load.service(S3Gate) + .get_endpoint() + .replace("http://", "") + .replace("https://", "") for node_under_load in self.nodes_under_load ) ), @@ -131,8 +134,13 @@ class BackgroundLoadController: @reporter.step_deco("Startup load") def startup(self): self.prepare() + self.preset() self.start() + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + def preset(self): + self.runner.preset() + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Stop and get results of load") def teardown(self, load_report: Optional[LoadReport] = None): diff --git a/src/frostfs_testlib/testing/__init__.py b/src/frostfs_testlib/testing/__init__.py new file mode 100644 index 0000000..3483972 --- /dev/null +++ b/src/frostfs_testlib/testing/__init__.py @@ -0,0 +1,2 @@ +from frostfs_testlib.testing.parallel import parallel +from frostfs_testlib.testing.test_control import expect_not_raises, run_optionally, wait_for_success diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py new file mode 100644 index 0000000..7f4ee26 --- /dev/null +++ b/src/frostfs_testlib/testing/parallel.py @@ -0,0 +1,98 @@ +import itertools +from concurrent.futures import Future, ThreadPoolExecutor +from typing import Callable, Collection, Optional, Union + + +def parallel( + fn: Union[Callable, list[Callable]], + parallel_items: Optional[Collection] = None, + *args, + **kwargs, +) -> list[Future]: + """Parallel execution of selected function or list of function using ThreadPoolExecutor. + Also checks the exceptions of each thread. + + Args: + fn: function(s) to run. Can work in 2 modes: + 1. If you have dedicated function with some items to process in parallel, + like you do with executor.map(fn, parallel_items), pass this function as fn. + 2. If you need to process each item with it's own method, like you do + with executor.submit(fn, args, kwargs), pass list of methods here. + See examples in runners.py in this repo. + parallel_items: items to iterate on (should be None in case of 2nd mode). + args: any other args required in target function(s). + if any arg is itertool.cycle, it will be iterated before passing to new thread. + kwargs: any other kwargs required in target function(s) + if any kwarg is itertool.cycle, it will be iterated before passing to new thread. + + Returns: + list of futures. + """ + + if callable(fn): + if not parallel_items: + raise RuntimeError("Parallel items should not be none when fn is callable.") + futures = _run_by_items(fn, parallel_items, *args, **kwargs) + elif isinstance(fn, list): + futures = _run_by_fn_list(fn, *args, **kwargs) + else: + raise RuntimeError("Nothing to run. fn should be either callable or list of callables.") + + # Check for exceptions + exceptions = [future.exception() for future in futures if future.exception()] + if exceptions: + message = "\n".join([str(e) for e in exceptions]) + raise RuntimeError(f"The following exceptions occured during parallel run: {message}") + return futures + + +def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: + if not len(fn_list): + return [] + if not all([callable(f) for f in fn_list]): + raise RuntimeError("fn_list should contain only callables") + + futures: list[Future] = [] + + with ThreadPoolExecutor(max_workers=len(fn_list)) as executor: + for fn in fn_list: + task_args = _get_args(*args) + task_kwargs = _get_kwargs(**kwargs) + + futures.append(executor.submit(fn, *task_args, **task_kwargs)) + + return futures + + +def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]: + futures: list[Future] = [] + + with ThreadPoolExecutor(max_workers=len(parallel_items)) as executor: + for item in parallel_items: + task_args = _get_args(*args) + task_kwargs = _get_kwargs(**kwargs) + task_args.insert(0, item) + + futures.append(executor.submit(fn, *task_args, **task_kwargs)) + + return futures + + +def _get_kwargs(**kwargs): + actkwargs = {} + for key, arg in kwargs.items(): + if isinstance(arg, itertools.cycle): + actkwargs[key] = next(arg) + else: + actkwargs[key] = arg + return actkwargs + + +def _get_args(*args): + actargs = [] + for arg in args: + if isinstance(arg, itertools.cycle): + actargs.append(next(arg)) + else: + actargs.append(arg) + return actargs diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index fbc4a8f..01cf462 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -3,3 +3,4 @@ import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.json_utils import frostfs_testlib.utils.string_utils import frostfs_testlib.utils.wallet_utils +from frostfs_testlib.utils.file_keeper import FileKeeper From f8409fa9f96e28385dcbb330bb09831ebef6e0f6 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 5 Jul 2023 09:21:25 +0300 Subject: [PATCH 016/274] Change name metric Inner ring Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/storage/dataclasses/frostfs_services.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 2b52c1f..23e3335 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -16,7 +16,7 @@ class InnerRing(NodeBase): """ def service_healthcheck(self) -> bool: - health_metric = "frostfs_node_ir_health" + health_metric = "frostfs_ir_ir_health" output = ( self.host.get_shell() .exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d") @@ -167,7 +167,7 @@ class StorageNode(NodeBase): def get_un_locode(self): return self._get_attribute(ConfigAttributes.UN_LOCODE) - + def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) From 3050ccc9fa5597a398b6f81999de54f264d4d443 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 6 Jul 2023 14:20:23 +0300 Subject: [PATCH 017/274] Added -k parameter to curl to ignore self signed SSL certificate --- src/frostfs_testlib/steps/http/http_gate.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 64bb5ce..e0ae8fa 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -215,13 +215,13 @@ def upload_via_http_gate_curl( # pre-clean _cmd_run("rm pipe -f") files = f"file=@pipe;filename={os.path.basename(filepath)}" - cmd = f"mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F '{files}' {attributes} {request}" + cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}" output = _cmd_run(cmd, LONG_TIMEOUT) # clean up pipe _cmd_run("rm pipe") else: files = f"file=@{filepath};filename={os.path.basename(filepath)}" - cmd = f"curl -F '{files}' {attributes} {request}" + cmd = f"curl -k -F '{files}' {attributes} {request}" output = _cmd_run(cmd) if error_pattern: @@ -246,7 +246,7 @@ def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str: request = f"{endpoint}/get/{cid}/{oid}" file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") - cmd = f"curl {request} > {file_path}" + cmd = f"curl -k {request} > {file_path}" _cmd_run(cmd) return file_path From 14c85e0a9e8e288a0b4599843a7f803df9268ea2 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Fri, 7 Jul 2023 16:41:59 +0300 Subject: [PATCH 018/274] Added verify=False to requests calls to ignore self signed SSL certificate --- src/frostfs_testlib/steps/http/http_gate.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index e0ae8fa..efc5258 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -49,7 +49,7 @@ def get_via_http_gate( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -77,7 +77,7 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optiona endpoint: http gate endpoint """ request = f"{endpoint}/zip/{cid}/{prefix}" - resp = requests.get(request, stream=True, timeout=timeout) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -123,7 +123,7 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -156,7 +156,7 @@ def upload_via_http_gate( request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout) + resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) if not resp.ok: raise Exception( From 917dc6f6d8cc22e4df046dd46a951d50dd3268d9 Mon Sep 17 00:00:00 2001 From: sstovbyra Date: Tue, 11 Jul 2023 18:22:54 +0300 Subject: [PATCH 019/274] add_wait_for_service_to_be_in_state --- src/frostfs_testlib/hosting/docker_host.py | 4 ++++ src/frostfs_testlib/hosting/interfaces.py | 13 +++++++++++++ 2 files changed, 17 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 5dcac9e..3934d9f 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -135,6 +135,10 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) + def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: + raise NotImplementedError("Not implemented for docker") + + def get_data_directory(self, service_name: str) -> str: service_attributes = self._get_service_attributes(service_name) return service_attributes.data_directory_path diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 8d889da..cdd3379 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -266,3 +266,16 @@ class Host(ABC): True if message found in logs in the given time frame. False otherwise. """ + + + @abstractmethod + def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: + """ + Waites for service to be in specified state. + + Args: + systemd_service_name: Service to wait state of. + expected_state: State to wait for + timeout: Seconds to wait + + """ From 59b41579915c3750e3b98e7e859053a1c3bdee0b Mon Sep 17 00:00:00 2001 From: "d.anurin" Date: Wed, 12 Jul 2023 09:41:17 +0300 Subject: [PATCH 020/274] Added sudo parameter for getting shell with elevated rights or not --- src/frostfs_testlib/hosting/docker_host.py | 4 ++-- src/frostfs_testlib/hosting/interfaces.py | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 3934d9f..94ee2ff 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -61,10 +61,10 @@ class ServiceAttributes(ParsedAttributes): class DockerHost(Host): """Manages services hosted in Docker containers running on a local or remote machine.""" - def get_shell(self) -> Shell: + def get_shell(self, sudo: bool = True) -> Shell: host_attributes = HostAttributes.parse(self._config.attributes) command_inspectors = [] - if host_attributes.sudo_shell: + if sudo: command_inspectors.append(SudoInspector()) if not host_attributes.ssh_login: diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index cdd3379..b4f67fb 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -65,9 +65,12 @@ class Host(ABC): return cli_config @abstractmethod - def get_shell(self) -> Shell: + def get_shell(self, sudo: bool = True) -> Shell: """Returns shell to this host. + Args: + sudo: if True, run all commands in shell with elevated rights + Returns: Shell that executes commands on this host. """ From 62216293f8b4abcec73b92d6df3158745c90d7e3 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 14 Jul 2023 16:04:44 +0300 Subject: [PATCH 021/274] Updates for s3 k6 --- src/frostfs_testlib/load/k6.py | 60 +++-------------- src/frostfs_testlib/load/load_config.py | 67 ++++++++++++++++++- src/frostfs_testlib/load/runners.py | 4 +- .../controllers/background_load_controller.py | 7 +- 4 files changed, 80 insertions(+), 58 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 7ec3c21..cb3576e 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -2,9 +2,10 @@ import json import logging import math import os -from dataclasses import dataclass, fields +from dataclasses import dataclass from time import sleep from typing import Any +from urllib.parse import urlparse from frostfs_testlib.load.interfaces import Loader from frostfs_testlib.load.load_config import ( @@ -16,11 +17,7 @@ from frostfs_testlib.load.load_config import ( from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import STORAGE_USER_NAME -from frostfs_testlib.resources.load_params import ( - K6_STOP_SIGNAL_TIMEOUT, - K6_TEARDOWN_PERIOD, - LOAD_NODE_SSH_USER, -) +from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import wait_for_success @@ -60,10 +57,9 @@ class K6: self.loader: Loader = loader self.shell: Shell = shell self.wallet = wallet - self.scenario: LoadScenario = load_params.scenario self.summary_json: str = os.path.join( self.load_params.working_dir, - f"{self.load_params.load_id}_{self.scenario.value}_summary.json", + f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json", ) self._k6_dir: str = k6_dir @@ -98,24 +94,7 @@ class K6: preset_scenario = preset_map[self.load_params.load_type] command_args = base_args[preset_scenario].copy() - command_args += [ - f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'" - for field in fields(self.load_params) - if field.metadata - and self.scenario in field.metadata["applicable_scenarios"] - and field.metadata["preset_argument"] - and getattr(self.load_params, field.name) is not None - ] - - if self.load_params.preset: - command_args += [ - f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'" - for field in fields(self.load_params.preset) - if field.metadata - and self.scenario in field.metadata["applicable_scenarios"] - and field.metadata["preset_argument"] - and getattr(self.load_params.preset, field.name) is not None - ] + command_args += self.load_params.get_preset_arguments() command = " ".join(command_args) result = self.shell.exec(command) @@ -127,26 +106,7 @@ class K6: @reporter.step_deco("Generate K6 command") def _generate_env_variables(self) -> str: - env_vars = { - field.metadata["env_variable"]: getattr(self.load_params, field.name) - for field in fields(self.load_params) - if field.metadata - and self.scenario in field.metadata["applicable_scenarios"] - and field.metadata["env_variable"] - and getattr(self.load_params, field.name) is not None - } - - if self.load_params.preset: - env_vars.update( - { - field.metadata["env_variable"]: getattr(self.load_params.preset, field.name) - for field in fields(self.load_params.preset) - if field.metadata - and self.scenario in field.metadata["applicable_scenarios"] - and field.metadata["env_variable"] - and getattr(self.load_params.preset, field.name) is not None - } - ) + env_vars = self.load_params.get_env_vars() env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) env_vars["SUMMARY_JSON"] = self.summary_json @@ -164,7 +124,7 @@ class K6: ): command = ( f"{self._k6_dir}/k6 run {self._generate_env_variables()} " - f"{self._k6_dir}/scenarios/{self.scenario.value}.js" + f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" ) user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None self._k6_process = RemoteProcess.create( @@ -215,10 +175,10 @@ class K6: summary_text = self.shell.exec(f"cat {self.summary_json}").stdout summary_json = json.loads(summary_text) - + endpoint = urlparse(self.endpoints[0]).netloc or self.endpoints[0] allure_filenames = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.scenario.value}_summary.json", - K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.scenario.value}_{self.endpoints[0]}_summary.json", + K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.load_params.scenario.value}_summary.json", + K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.load_params.scenario.value}_{endpoint}_summary.json", } allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy] diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index c337d7c..357a129 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -1,7 +1,8 @@ import os -from dataclasses import dataclass, field +from dataclasses import dataclass, field, fields, is_dataclass from enum import Enum -from typing import Optional +from types import MappingProxyType +from typing import Any, Optional, get_args class LoadType(Enum): @@ -42,6 +43,12 @@ grpc_preset_scenarios = [ s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR] +@dataclass +class MetaField: + metadata: MappingProxyType + value: Any + + def metadata_field( applicable_scenarios: list[LoadScenario], preset_param: Optional[str] = None, @@ -138,6 +145,12 @@ class LoadParams: preset: Optional[Preset] = None # K6 download url k6_url: Optional[str] = None + # No ssl verification flag + no_verify_ssl: Optional[bool] = metadata_field( + [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.VERIFY, LoadScenario.HTTP], + "no-verify-ssl", + "NO_VERIFY_SSL", + ) # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. @@ -225,3 +238,53 @@ class LoadParams: self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") if self.preset: self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") + + def get_env_vars(self): + env_vars = { + meta_field.metadata["env_variable"]: meta_field.value + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] + and meta_field.metadata["env_variable"] + and meta_field.value + } + + return env_vars + + def get_preset_arguments(self): + command_args = [ + self._get_preset_argument(meta_field) + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] + and meta_field.metadata["preset_argument"] + and meta_field.value + and self._get_preset_argument(meta_field) + ] + + return command_args + + @staticmethod + def _get_preset_argument(meta_field: MetaField) -> str: + if isinstance(meta_field.value, bool): + # For preset calls, bool values are passed with just -- if the value is True + return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else "" + + return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'" + + @staticmethod + def _get_meta_fields(instance) -> list[MetaField]: + data_fields = fields(instance) + + fields_with_data = [ + MetaField(field.metadata, getattr(instance, field.name)) + for field in data_fields + if field.metadata and getattr(instance, field.name) + ] + + for field in data_fields: + actual_field_type = ( + get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) + ) + if is_dataclass(actual_field_type) and getattr(instance, field.name): + fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name)) + + return fields_with_data or [] diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index d6cf2ae..428cd7d 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -6,6 +6,7 @@ import time from concurrent.futures import ThreadPoolExecutor from dataclasses import fields from typing import Optional +from urllib.parse import urlparse import yaml @@ -257,9 +258,10 @@ class DefaultRunner(RunnerBase): raise RuntimeError("k6_process_allocation_strategy should not be none") result = k6_instance.get_results() + endpoint = urlparse(k6_instance.endpoints[0]).netloc or k6_instance.endpoints[0] keys_map = { K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip, - K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0], + K6ProcessAllocationStrategy.PER_ENDPOINT: endpoint, } key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] results[key] = result diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index ac3a920..58a7a6f 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -80,17 +80,14 @@ class BackgroundLoadController: LoadType.S3: { EndpointSelectionStrategy.ALL: list( set( - endpoint.replace("http://", "").replace("https://", "") + endpoint for node_under_load in self.nodes_under_load for endpoint in node_under_load.service(S3Gate).get_all_endpoints() ) ), EndpointSelectionStrategy.FIRST: list( set( - node_under_load.service(S3Gate) - .get_endpoint() - .replace("http://", "") - .replace("https://", "") + node_under_load.service(S3Gate).get_endpoint() for node_under_load in self.nodes_under_load ) ), From 4896abcec3959ef65c0e04515047510b0aeb951e Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 18 Jul 2023 20:38:37 +0300 Subject: [PATCH 022/274] Adding code validation targets Signed-off-by: Andrey Berezin --- .gitignore | 1 + CONTRIBUTING.md | 4 +- Makefile | 34 +++++++++++++--- pyproject.toml | 5 +++ requirements.txt | 1 + .../analytics/test_collector.py | 39 ++++++++++++------- .../analytics/test_exporter.py | 4 +- src/frostfs_testlib/hosting/docker_host.py | 18 +++++---- src/frostfs_testlib/utils/cli_utils.py | 4 +- tests/conftest.py | 5 +++ tests/helpers.py | 6 +-- 11 files changed, 80 insertions(+), 41 deletions(-) create mode 100644 tests/conftest.py diff --git a/.gitignore b/.gitignore index a7f7de0..e2967ea 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # ignore IDE files .vscode .idea +venv.* # ignore temp files under any path .DS_Store diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5996820..fdcaec7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -63,9 +63,9 @@ $ git checkout -b feature/123-something_awesome ``` ### Test your changes -Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command: +Before submitting any changes to the library, please, make sure that linter and all unit tests are passing. To run the tests, please, use the following command: ```shell -$ python -m unittest discover --start-directory tests +$ make validation ``` To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests: diff --git a/Makefile b/Makefile index c746608..9dbd86c 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,8 @@ PYTHON_VERSION := 3.10 VENV_DIR := venv.frostfs-testlib current_dir := $(shell pwd) +DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/))) +FROM_VENV := . ${VENV_DIR}/bin/activate && venv: create requirements paths precommit @echo Ready @@ -14,14 +16,34 @@ precommit: paths: @echo Append paths for project @echo Virtual environment: ${VENV_DIR} - @sudo rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @sudo touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | sudo tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth -create: - @echo Create virtual environment for +create: ${VENV_DIR} + +${VENV_DIR}: + @echo Create virtual environment ${VENV_DIR} virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR} requirements: @echo Isntalling pip requirements - . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt \ No newline at end of file + . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt + + +#### VALIDATION SECTION #### +lint: create requirements + ${FROM_VENV} pylint --disable R,C,W ./src + +unit_test: + @echo Starting unit tests + ${FROM_VENV} python -m pytest tests + +.PHONY: lint_dependent $(DIRECTORIES) +lint_dependent: $(DIRECTORIES) + +$(DIRECTORIES): + @echo checking dependent repo $@ + $(MAKE) validation -C $@ + +validation: lint unit_test lint_dependent \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 9140ee0..8fca533 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,3 +64,8 @@ push = false [tool.bumpver.file_patterns] "pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"'] "src/frostfs_testlib/__init__.py" = ["{version}"] + +[tool.pytest.ini_options] +filterwarnings = [ + "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", +] \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 5b47640..1fdf844 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,7 @@ black==22.8.0 bumpver==2022.1118 isort==5.12.0 pre-commit==2.20.0 +pylint==2.17.4 # Packaging dependencies build==0.8.0 diff --git a/src/frostfs_testlib/analytics/test_collector.py b/src/frostfs_testlib/analytics/test_collector.py index 0f5398e..56ee606 100644 --- a/src/frostfs_testlib/analytics/test_collector.py +++ b/src/frostfs_testlib/analytics/test_collector.py @@ -6,6 +6,7 @@ from docstring_parser.google import DEFAULT_SECTIONS, Section, SectionType DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE)) + class TestCase: """ Test case object implementation for use in collector and exporters @@ -106,7 +107,9 @@ class TestCaseCollector: # Read test_case suite and section name from test class if possible and get test function from class if test.cls: suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name) - suite_section_name = test.cls.__dict__.get("__test_case_suite_section__", suite_section_name) + suite_section_name = test.cls.__dict__.get( + "__test_case_suite_section__", suite_section_name + ) test_function = test.cls.__dict__[test.originalname] else: # If no test class, read test function from module @@ -117,7 +120,9 @@ class TestCaseCollector: test_case_title = test_function.__dict__.get("__test_case_title__", None) test_case_priority = test_function.__dict__.get("__test_case_priority__", None) suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name) - suite_section_name = test_function.__dict__.get("__test_case_suite_section__", suite_section_name) + suite_section_name = test_function.__dict__.get( + "__test_case_suite_section__", suite_section_name + ) # Parce test_steps if they define in __doc__ doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE) @@ -125,7 +130,9 @@ class TestCaseCollector: if doc_string.short_description: test_case_description = doc_string.short_description if doc_string.long_description: - test_case_description = f"{doc_string.short_description}\r\n{doc_string.long_description}" + test_case_description = ( + f"{doc_string.short_description}\r\n{doc_string.long_description}" + ) if doc_string.meta: for meta in doc_string.meta: @@ -140,25 +147,27 @@ class TestCaseCollector: test_case_params = test_case_call_spec.id # Format title with params if test_case_title: - test_case_title = self.__format_string_with_params__(test_case_title,test_case_call_spec.params) + test_case_title = self.__format_string_with_params__( + test_case_title, test_case_call_spec.params + ) # Format steps with params if test_case_steps: for key, value in test_case_steps.items(): - value = self.__format_string_with_params__(value,test_case_call_spec.params) + value = self.__format_string_with_params__(value, test_case_call_spec.params) test_case_steps[key] = value # If there is set basic test case attributes create TestCase and return if test_case_id and test_case_title and suite_name and suite_name: test_case = TestCase( - id=test_case_id, - title=test_case_title, - description=test_case_description, - priority=test_case_priority, - steps=test_case_steps, - params=test_case_params, - suite_name=suite_name, - suite_section_name=suite_section_name, - ) + uuid_id=test_case_id, + title=test_case_title, + description=test_case_description, + priority=test_case_priority, + steps=test_case_steps, + params=test_case_params, + suite_name=suite_name, + suite_section_name=suite_section_name, + ) return test_case # Return None if there is no enough information for return test case return None @@ -187,4 +196,4 @@ class TestCaseCollector: test_case = self.__get_test_case_from_pytest_test__(test) if test_case: test_cases.append(test_case) - return test_cases \ No newline at end of file + return test_cases diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index 263995c..5a569c6 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -67,6 +67,6 @@ class TestExporter(ABC): steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] if test_case_in_tms: - self.update_test_case(test_case, test_case_in_tms) + self.update_test_case(test_case, test_case_in_tms, test_suite, test_section) else: - self.create_test_case(test_case) + self.create_test_case(test_case, test_suite, test_section) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 94ee2ff..3addd92 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -135,13 +135,19 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) - def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: + def wait_for_service_to_be_in_state( + self, systemd_service_name: str, expected_state: str, timeout: int + ) -> None: raise NotImplementedError("Not implemented for docker") - def get_data_directory(self, service_name: str) -> str: service_attributes = self._get_service_attributes(service_name) - return service_attributes.data_directory_path + + client = self._get_docker_client() + volume_info = client.inspect_volume(service_attributes.volume_name) + volume_path = volume_info["Mountpoint"] + + return volume_path def delete_metabase(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") @@ -159,11 +165,7 @@ class DockerHost(Host): raise NotImplementedError("Not implemented for docker") def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: - service_attributes = self._get_service_attributes(service_name) - - client = self._get_docker_client() - volume_info = client.inspect_volume(service_attributes.volume_name) - volume_path = volume_info["Mountpoint"] + volume_path = self.get_data_directory(service_name) shell = self.get_shell() meta_clean_cmd = f"rm -rf {volume_path}/meta*/*" diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 7ed1a27..d869714 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -68,9 +68,7 @@ def _cmd_run(cmd: str, timeout: int = 90) -> str: end_time = datetime.now() _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) logger.info( - f"Command: {cmd}\n" - f"Error:\nreturn code: {return_code}\n" - f"Output: {exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}" + f"Command: {cmd}\n" f"Error:\nreturn code: {return_code}\n" f"Output: {cmd_output}" ) raise diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..ea6d681 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,5 @@ +import os +import sys + +app_dir = os.path.join(os.getcwd(), "src") +sys.path.insert(0, app_dir) diff --git a/tests/helpers.py b/tests/helpers.py index 8391002..b7776fd 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -14,11 +14,7 @@ def format_error_details(error: Exception) -> str: Returns: String containing exception details. """ - detail_lines = traceback.format_exception( - etype=type(error), - value=error, - tb=error.__traceback__, - ) + detail_lines = traceback.format_exception(error) return "".join(detail_lines) From 15862e5901d431dc62d9f5f26fb65aca3e5e4df8 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Thu, 20 Jul 2023 03:41:21 +0300 Subject: [PATCH 023/274] Add optional parameter "copies_number" in "frostfs-cli object put" Signed-off-by: Vladimir Avdeev --- src/frostfs_testlib/cli/frostfs_cli/object.py | 2 ++ src/frostfs_testlib/steps/cli/object.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 1c1d0ac..8915914 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -224,6 +224,7 @@ class FrostfsCliObject(CliCommand): address: Optional[str] = None, attributes: Optional[dict] = None, bearer: Optional[str] = None, + copies_number: Optional[int] = None, disable_filename: bool = False, disable_timestamp: bool = False, expire_at: Optional[int] = None, @@ -241,6 +242,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. attributes: User attributes in form of Key1=Value1,Key2=Value2. bearer: File with signed JSON or binary encoded bearer token. + copies_number: Number of copies of the object to store within the RPC call. cid: Container ID. disable_filename: Do not set well-known filename attribute. disable_timestamp: Do not set well-known timestamp attribute. diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 8be7982..d575522 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -239,6 +239,7 @@ def put_object( shell: Shell, endpoint: str, bearer: Optional[str] = None, + copies_number: Optional[int] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, wallet_config: Optional[str] = None, @@ -256,6 +257,7 @@ def put_object( cid: ID of Container where we get the Object from shell: executor for cli command bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call attributes: User attributes in form of Key1=Value1,Key2=Value2 endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key wallet_config: path to the wallet config @@ -276,6 +278,7 @@ def put_object( cid=cid, attributes=attributes, bearer=bearer, + copies_number=copies_number, expire_at=expire_at, no_progress=no_progress, xhdr=xhdr, From 8dcfae5cb2c4bea03c8ae16293feee5d97c3d6de Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 20 Jul 2023 12:45:19 +0300 Subject: [PATCH 024/274] Fix empty and zero values parsing Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 357a129..e73eea7 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -245,7 +245,7 @@ class LoadParams: for meta_field in self._get_meta_fields(self) if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.metadata["env_variable"] - and meta_field.value + and meta_field.value is not None } return env_vars @@ -256,7 +256,7 @@ class LoadParams: for meta_field in self._get_meta_fields(self) if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.metadata["preset_argument"] - and meta_field.value + and meta_field.value is not None and self._get_preset_argument(meta_field) ] From 675183cd9a350b186f9b4a9b5448d1634142e240 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 21 Jul 2023 11:46:01 +0300 Subject: [PATCH 025/274] Fix empty and zero values parsing part 2 Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index e73eea7..73addf7 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -277,7 +277,7 @@ class LoadParams: fields_with_data = [ MetaField(field.metadata, getattr(instance, field.name)) for field in data_fields - if field.metadata and getattr(instance, field.name) + if field.metadata and getattr(instance, field.name) is not None ] for field in data_fields: From 49ccd47e814ead80ce9b642506f3e467581e8a4e Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 21 Jul 2023 15:28:10 +0300 Subject: [PATCH 026/274] =?UTF-8?q?Add=20=D1=81opies=5Fnumber=20argument?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/object.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index d575522..9a63604 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -183,6 +183,7 @@ def put_object_to_random_node( shell: Shell, cluster: Cluster, bearer: Optional[str] = None, + copies_number: Optional[int] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, wallet_config: Optional[str] = None, @@ -201,6 +202,7 @@ def put_object_to_random_node( shell: executor for cli command cluster: cluster under test bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call attributes: User attributes in form of Key1=Value1,Key2=Value2 cluster: cluster under test wallet_config: path to the wallet config @@ -221,6 +223,7 @@ def put_object_to_random_node( shell, endpoint, bearer, + copies_number, attributes, xhdr, wallet_config, From 9c792c091e657acfefd99e598fc1420f9fbfc73d Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 20 Jul 2023 21:05:49 +0300 Subject: [PATCH 027/274] Add error_threshold parameter, add error check after load Signed-off-by: Andrey Berezin --- Makefile | 11 ++- src/frostfs_testlib/load/load_config.py | 5 + src/frostfs_testlib/load/load_report.py | 1 + src/frostfs_testlib/load/load_verifiers.py | 94 +++++++++++++------ .../controllers/background_load_controller.py | 54 ++++++----- 5 files changed, 105 insertions(+), 60 deletions(-) diff --git a/Makefile b/Makefile index 9dbd86c..365e2fc 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ SHELL := /bin/bash PYTHON_VERSION := 3.10 -VENV_DIR := venv.frostfs-testlib +VENV_NAME := frostfs-testlib +VENV_DIR := venv.${VENV_NAME} current_dir := $(shell pwd) DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/))) @@ -15,16 +16,16 @@ precommit: paths: @echo Append paths for project - @echo Virtual environment: ${VENV_DIR} + @echo Virtual environment: ${current_dir}/${VENV_DIR} @rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth @touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo ${current_dir}/src/frostfs_testlib | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth create: ${VENV_DIR} ${VENV_DIR}: - @echo Create virtual environment ${VENV_DIR} - virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR} + @echo Create virtual environment ${current_dir}/${VENV_DIR} + virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR} requirements: @echo Isntalling pip requirements diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 73addf7..9a7e49c 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -139,6 +139,11 @@ class LoadParams: verify: Optional[bool] = None # Just id for load so distinct it between runs. Filled automatically. load_id: Optional[str] = None + # Acceptable number of load errors in % + # 100 means 100% errors allowed + # 1.5 means 1.5% errors allowed + # 0 means no errors allowed + error_threshold: Optional[float] = None # Working directory working_dir: Optional[str] = None # Preset for the k6 run diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index dcd81b4..fa71069 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -154,6 +154,7 @@ class LoadReport: {per_node_errors_html} {self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")} + {self._row("Threshold", f"{self.load_params.error_threshold:.2f}%")}
Errors


""" diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index 1ff63ae..f2a3e7e 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -2,7 +2,9 @@ import logging from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object +from frostfs_testlib.reporter import get_reporter +reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -10,54 +12,88 @@ class LoadVerifier: def __init__(self, load_params: LoadParams) -> None: self.load_params = load_params - def verify_summaries(self, load_summary, verification_summary) -> None: - exceptions = [] + def verify_load_results(self, load_summaries: dict[str, dict]): + write_operations = 0 + write_errors = 0 - if not verification_summary or not load_summary: - logger.info("Can't check load results due to missing summary") + read_operations = 0 + read_errors = 0 - load_metrics = get_metrics_object(self.load_params.scenario, load_summary) + delete_operations = 0 + delete_errors = 0 writers = self.load_params.writers or self.load_params.preallocated_writers or 0 readers = self.load_params.readers or self.load_params.preallocated_readers or 0 deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 - objects_count = load_metrics.write_success_iterations - fails_count = load_metrics.write_failed_iterations + for load_summary in load_summaries.values(): + metrics = get_metrics_object(self.load_params.scenario, load_summary) - if writers > 0: - if objects_count < 1: - exceptions.append("Total put objects should be greater than 0") - if fails_count > 0: - exceptions.append(f"There were {fails_count} failed write operations") + if writers: + write_operations += metrics.write_total_iterations + write_errors += metrics.write_failed_iterations - if readers > 0: - read_count = load_metrics.read_success_iterations - read_fails_count = load_metrics.read_failed_iterations - if read_count < 1: - exceptions.append("Total read operations should be greater than 0") - if read_fails_count > 0: - exceptions.append(f"There were {read_fails_count} failed read operations") + if readers: + read_operations += metrics.read_total_iterations + read_errors += metrics.read_failed_iterations + + if deleters: + delete_operations += metrics.delete_total_iterations + delete_errors += metrics.delete_failed_iterations + + exceptions = [] + if writers and not write_operations: + exceptions.append(f"No any write operation was performed") + if readers and not read_operations: + exceptions.append(f"No any read operation was performed") + if deleters and not delete_operations: + exceptions.append(f"No any delete operation was performed") + + if writers and write_errors / write_operations * 100 > self.load_params.error_threshold: + exceptions.append( + f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" + ) + if readers and read_errors / read_operations * 100 > self.load_params.error_threshold: + exceptions.append( + f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" + ) + if deleters and delete_errors / delete_operations * 100 > self.load_params.error_threshold: + exceptions.append( + f"Delete error rate is greater than threshold: {delete_errors / delete_operations * 100} > {self.load_params.error_threshold}" + ) + + assert not exceptions, "\n".join(exceptions) + + def check_verify_results(self, load_summaries, verification_summaries) -> None: + for node_or_endpoint in load_summaries: + with reporter.step(f"Check verify scenario results for {node_or_endpoint}"): + self._check_verify_result( + load_summaries[node_or_endpoint], verification_summaries[node_or_endpoint] + ) + + def _check_verify_result(self, load_summary, verification_summary) -> None: + exceptions = [] + + load_metrics = get_metrics_object(self.load_params.scenario, load_summary) + + writers = self.load_params.writers or self.load_params.preallocated_writers or 0 + deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 + + delete_success = 0 if deleters > 0: - delete_count = load_metrics.delete_success_iterations - delete_fails_count = load_metrics.delete_failed_iterations - if delete_count < 1: - exceptions.append("Total delete operations should be greater than 0") - if delete_fails_count > 0: - exceptions.append(f"There were {delete_fails_count} failed delete operations") + delete_success = load_metrics.delete_success_iterations if verification_summary: verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) verified_objects = verify_metrics.read_success_iterations invalid_objects = verify_metrics.read_failed_iterations + total_left_objects = load_metrics.write_success_iterations - delete_success - if invalid_objects > 0: - exceptions.append(f"There were {invalid_objects} verification fails") # Due to interruptions we may see total verified objects to be less than written on writers count - if abs(objects_count - verified_objects) > writers: + if abs(total_left_objects - verified_objects) > writers: exceptions.append( - f"Verified objects mismatch. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}." + f"Verified objects mismatch. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}." ) assert not exceptions, "\n".join(exceptions) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 58a7a6f..91cb1af 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -9,6 +9,7 @@ from frostfs_testlib.load.load_config import ( LoadScenario, LoadType, ) +from frostfs_testlib.load.load_metrics import get_metrics_object from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.reporter import get_reporter @@ -151,54 +152,55 @@ class BackgroundLoadController: load_report.add_summaries(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify results of load") + @reporter.step_deco("Run post-load verification") def verify(self): try: + self._verify_load_results() if self.load_params.verify: - self.verification_params = LoadParams( - verify_clients=self.load_params.verify_clients, - scenario=LoadScenario.VERIFY, - registry_file=self.load_params.registry_file, - verify_time=self.load_params.verify_time, - load_type=self.load_params.load_type, - load_id=self.load_params.load_id, - working_dir=self.load_params.working_dir, - endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, - k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, - setup_timeout="1s", - ) self._run_verify_scenario() - verification_summaries = self._get_results() - self.verify_summaries(self.load_summaries, verification_summaries) finally: self._reset_for_consequent_load() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify summaries from k6") - def verify_summaries(self, load_summaries: dict, verification_summaries: dict): + @reporter.step_deco("Verify load results") + def _verify_load_results(self): verifier = LoadVerifier(self.load_params) - for node_or_endpoint in load_summaries: - with reporter.step(f"Verify load summaries for {node_or_endpoint}"): - verifier.verify_summaries( - load_summaries[node_or_endpoint], verification_summaries[node_or_endpoint] - ) + verifier.verify_load_results(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) def wait_until_finish(self): self.runner.wait_until_finish() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Run verify scenario") + @reporter.step_deco("Verify loaded objects") def _run_verify_scenario(self): + self.verification_params = LoadParams( + verify_clients=self.load_params.verify_clients, + scenario=LoadScenario.VERIFY, + registry_file=self.load_params.registry_file, + verify_time=self.load_params.verify_time, + load_type=self.load_params.load_type, + load_id=self.load_params.load_id, + working_dir=self.load_params.working_dir, + endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, + k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, + setup_timeout="1s", + ) + if self.verification_params.verify_time is None: raise RuntimeError("verify_time should not be none") self.runner.init_k6_instances(self.verification_params, self.endpoints, self.k6_dir) - with reporter.step("Run verify load data"): + with reporter.step("Run verify scenario"): self.runner.start() self.runner.wait_until_finish() + with reporter.step("Check verify results"): + verification_summaries = self._get_results() + verifier = LoadVerifier(self.load_params) + verifier.check_verify_results(self.load_summaries, verification_summaries) + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Get load results") def _get_results(self) -> dict: - return self.runner.get_results() + with reporter.step(f"Get {self.load_params.scenario.value} scenario results"): + return self.runner.get_results() From 38742badf2e6e565345cefa1fb48442046e58222 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 21 Jul 2023 19:12:11 +0300 Subject: [PATCH 028/274] Add unit tests for load_config.py Signed-off-by: Andrey Berezin --- src/frostfs_testlib/utils/__init__.py | 2 + tests/test_load_config.py | 541 ++++++++++++++++++++++++++ 2 files changed, 543 insertions(+) create mode 100644 tests/test_load_config.py diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index 01cf462..0ac903a 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -3,4 +3,6 @@ import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.json_utils import frostfs_testlib.utils.string_utils import frostfs_testlib.utils.wallet_utils + +# TODO: Circullar dependency FileKeeper -> NodeBase -> Utils -> FileKeeper -> NodeBase from frostfs_testlib.utils.file_keeper import FileKeeper diff --git a/tests/test_load_config.py b/tests/test_load_config.py new file mode 100644 index 0000000..a9b6de1 --- /dev/null +++ b/tests/test_load_config.py @@ -0,0 +1,541 @@ +from dataclasses import Field, dataclass, fields, is_dataclass +from typing import Any, get_args + +import pytest + +from frostfs_testlib.load.load_config import LoadParams, LoadScenario, LoadType, Preset + + +@dataclass +class MetaTestField: + field: Field + field_type: type + instance: Any + + +class TestLoadConfig: + @pytest.fixture + def set_empty(self, request: pytest.FixtureRequest): + # Workaround for verify + if "param" in request.__dict__ and request.param: + return request.param + + return False + + @pytest.fixture + def load_type(self, request: pytest.FixtureRequest): + # Workaround for verify + if "param" in request.__dict__ and request.param: + return request.param + + return None + + @pytest.fixture + def load_params(self, load_type: LoadType, set_empty: bool, request: pytest.FixtureRequest): + load_scenario = request.param + return self._get_filled_load_params(load_type, load_scenario, set_empty) + + def test_load_params_only_load_type_required(self): + LoadParams(load_type=LoadType.S3) + + def test_load_params_initially_have_all_values_none(self): + load_params = LoadParams(load_type=LoadType.S3) + self._check_all_values_none(load_params, ["load_type"]) + + def test_preset_initially_have_all_values_none(self): + preset = Preset() + self._check_all_values_none(preset) + + def test_load_set_id_changes_fields(self): + load_params = LoadParams(load_type=LoadType.S3) + load_params.preset = Preset() + load_params.working_dir = "/tmp" + load_params.set_id("test_id") + + assert load_params.registry_file == "/tmp/test_id_registry.bolt" + assert load_params.preset.pregen_json == "/tmp/test_id_prepare.json" + assert load_params.load_id == "test_id" + + # No other values should be changed + self._check_all_values_none( + load_params, ["load_type", "working_dir", "load_id", "registry_file", "preset"] + ) + self._check_all_values_none(load_params.preset, ["pregen_json"]) + + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) + def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC_CAR], indirect=True) + def test_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "MAX_WRITERS": 11, + "MAX_READERS": 11, + "MAX_DELETERS": 12, + "PRE_ALLOC_DELETERS": 21, + "PRE_ALLOC_READERS": 20, + "PRE_ALLOC_WRITERS": 20, + "PREGEN_JSON": "pregen_json", + "TIME_UNIT": "time_unit", + "WRITE_RATE": 10, + "READ_RATE": 9, + "DELETE_RATE": 11, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.S3], indirect=True) + def test_argument_parsing_for_s3_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--no-verify-ssl", + "--out 'pregen_json'", + "--workers '7'", + "--buckets '13'", + "--location 's3_location'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "NO_VERIFY_SSL": True, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) + def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--no-verify-ssl", + "--out 'pregen_json'", + "--workers '7'", + "--buckets '13'", + "--location 's3_location'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "MAX_WRITERS": 11, + "MAX_READERS": 11, + "MAX_DELETERS": 12, + "PRE_ALLOC_DELETERS": 21, + "PRE_ALLOC_READERS": 20, + "PRE_ALLOC_WRITERS": 20, + "PREGEN_JSON": "pregen_json", + "TIME_UNIT": "time_unit", + "WRITE_RATE": 10, + "READ_RATE": 9, + "DELETE_RATE": 11, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True) + def test_argument_parsing_for_http_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--no-verify-ssl", + "--size '11'", + "--preload_obj '13'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "NO_VERIFY_SSL": True, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True) + def test_argument_parsing_for_local_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy'", + ] + expected_env_vars = { + "CONFIG_FILE": "config_file", + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True + ) + def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 14, + "REGISTRY_FILE": "registry_file", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "TIME_LIMIT": 11, + } + + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True + ) + def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 14, + "REGISTRY_FILE": "registry_file", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "TIME_LIMIT": 11, + } + + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC, True)], indirect=True) + def test_empty_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True + ) + def test_empty_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "MAX_WRITERS": 0, + "MAX_READERS": 0, + "MAX_DELETERS": 0, + "PRE_ALLOC_DELETERS": 0, + "PRE_ALLOC_READERS": 0, + "PRE_ALLOC_WRITERS": 0, + "PREGEN_JSON": "", + "TIME_UNIT": "", + "WRITE_RATE": 0, + "READ_RATE": 0, + "DELETE_RATE": 0, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3, True)], indirect=True) + def test_empty_argument_parsing_for_s3_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--buckets '0'", + "--location ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "NO_VERIFY_SSL": False, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3_CAR, True)], indirect=True) + def test_empty_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--buckets '0'", + "--location ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "NO_VERIFY_SSL": False, + "MAX_WRITERS": 0, + "MAX_READERS": 0, + "MAX_DELETERS": 0, + "PRE_ALLOC_DELETERS": 0, + "PRE_ALLOC_READERS": 0, + "PRE_ALLOC_WRITERS": 0, + "PREGEN_JSON": "", + "TIME_UNIT": "", + "WRITE_RATE": 0, + "READ_RATE": 0, + "DELETE_RATE": 0, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.HTTP, True)], indirect=True) + def test_empty_argument_parsing_for_http_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "NO_VERIFY_SSL": False, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.LOCAL, True)], indirect=True) + def test_empty_argument_parsing_for_local_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + ] + expected_env_vars = { + "CONFIG_FILE": "", + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type, set_empty", + [(LoadScenario.VERIFY, LoadType.S3, True)], + indirect=True, + ) + def test_empty_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 0, + "REGISTRY_FILE": "", + "K6_SETUP_TIMEOUT": "", + "NO_VERIFY_SSL": False, + "TIME_LIMIT": 0, + } + + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type, set_empty", + [(LoadScenario.VERIFY, LoadType.gRPC, True)], + indirect=True, + ) + def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 0, + "REGISTRY_FILE": "", + "K6_SETUP_TIMEOUT": "", + "NO_VERIFY_SSL": False, + "TIME_LIMIT": 0, + } + + self._check_env_vars(load_params, expected_env_vars) + + def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]): + preset_parameters = load_params.get_preset_arguments() + assert sorted(preset_parameters) == sorted(expected_preset_args) + + def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]): + env_vars = load_params.get_env_vars() + assert env_vars == expected_env_vars + + def _check_all_values_none(self, dataclass, skip_fields=None): + if skip_fields is None: + skip_fields = [] + + dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields] + for field in dataclass_fields: + value = getattr(dataclass, field.name) + assert value is None, f"{field.name} is not None" + + def _check_all_values_not_none(self, dataclass, skip_fields=None): + if skip_fields is None: + skip_fields = [] + + dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields] + for field in dataclass_fields: + value = getattr(dataclass, field.name) + assert value is not None, f"{field.name} is not None" + + def _get_filled_load_params( + self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False + ) -> LoadParams: + load_type_map = { + LoadScenario.S3: LoadType.S3, + LoadScenario.S3_CAR: LoadType.S3, + LoadScenario.gRPC: LoadType.gRPC, + LoadScenario.gRPC_CAR: LoadType.gRPC, + LoadScenario.LOCAL: LoadType.gRPC, + LoadScenario.HTTP: LoadType.HTTP, + } + load_type = load_type_map[load_scenario] if not load_type else load_type + + load_params = LoadParams(load_type) + load_params.scenario = load_scenario + load_params.preset = Preset() + + meta_fields = self._get_meta_fields(load_params) + for field in meta_fields: + if ( + getattr(field.instance, field.field.name) is None + and load_params.scenario in field.field.metadata["applicable_scenarios"] + ): + value_to_set_map = { + int: 0 if set_emtpy else len(field.field.name), + str: "" if set_emtpy else field.field.name, + bool: False if set_emtpy else True, + } + value_to_set = value_to_set_map[field.field_type] + setattr(field.instance, field.field.name, value_to_set) + + return load_params + + def _get_actual_field_type(self, field: Field) -> type: + return get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) + + def _get_meta_fields(self, instance): + data_fields = fields(instance) + fields_with_data = [ + MetaTestField(field, self._get_actual_field_type(field), instance) + for field in data_fields + if field.metadata + ] + + for field in data_fields: + actual_field_type = self._get_actual_field_type(field) + if is_dataclass(actual_field_type) and getattr(instance, field.name): + fields_with_data += self._get_meta_fields(getattr(instance, field.name)) + + return fields_with_data or [] From 2240be09d2091a45316fbe4f9f9325f88315db76 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 24 Jul 2023 19:34:21 +0300 Subject: [PATCH 029/274] Add repr and str for most classes used in parametrize Signed-off-by: Andrey Berezin --- .gitignore | 1 + Makefile | 2 +- pyproject.toml | 3 +- src/frostfs_testlib/load/load_config.py | 94 +++++++++++++------ src/frostfs_testlib/load/load_report.py | 18 +--- src/frostfs_testlib/s3/aws_cli_client.py | 2 + src/frostfs_testlib/s3/boto3_client.py | 2 + src/frostfs_testlib/s3/interfaces.py | 6 +- .../storage/dataclasses/node_base.py | 3 +- .../storage/dataclasses/object_size.py | 13 +++ src/frostfs_testlib/testing/readable.py | 27 ++++++ src/frostfs_testlib/utils/converting_utils.py | 13 +++ tests/test_dataclasses.py | 37 ++++++++ tests/test_load_config.py | 14 +++ 14 files changed, 187 insertions(+), 48 deletions(-) create mode 100644 src/frostfs_testlib/storage/dataclasses/object_size.py create mode 100644 src/frostfs_testlib/testing/readable.py create mode 100644 tests/test_dataclasses.py diff --git a/.gitignore b/.gitignore index e2967ea..4691fe4 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ venv.* /dist /build *.egg-info +wallet_config.yml \ No newline at end of file diff --git a/Makefile b/Makefile index 365e2fc..644eab0 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ paths: @echo Virtual environment: ${current_dir}/${VENV_DIR} @rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth @touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @echo ${current_dir}/src/frostfs_testlib | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo ${current_dir}/src | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth create: ${VENV_DIR} diff --git a/pyproject.toml b/pyproject.toml index 8fca533..f85b883 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,4 +68,5 @@ push = false [tool.pytest.ini_options] filterwarnings = [ "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", -] \ No newline at end of file +] +testpaths = ["tests"] \ No newline at end of file diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 9a7e49c..4e0b71f 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -4,6 +4,8 @@ from enum import Enum from types import MappingProxyType from typing import Any, Optional, get_args +from frostfs_testlib.utils.converting_utils import calc_unit + class LoadType(Enum): gRPC = "grpc" @@ -45,6 +47,7 @@ s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR] @dataclass class MetaField: + name: str metadata: MappingProxyType value: Any @@ -53,6 +56,7 @@ def metadata_field( applicable_scenarios: list[LoadScenario], preset_param: Optional[str] = None, scenario_variable: Optional[str] = None, + string_repr: Optional[bool] = True, distributed: Optional[bool] = False, ): return field( @@ -61,6 +65,7 @@ def metadata_field( "applicable_scenarios": applicable_scenarios, "preset_argument": preset_param, "env_variable": scenario_variable, + "string_repr": string_repr, "distributed": distributed, }, ) @@ -100,25 +105,27 @@ class K6ProcessAllocationStrategy(Enum): class Preset: # ------ COMMON ------ # Amount of objects which should be created - objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None) + objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False) # Preset json. Filled automatically. - pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON") + pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) # Workers count for preset - workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None) + workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) # ------ GRPC ------ # Amount of containers which should be created - containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None) + containers_count: Optional[int] = metadata_field( + grpc_preset_scenarios, "containers", None, False + ) # Container placement policy for containers for gRPC container_placement_policy: Optional[str] = metadata_field( - grpc_preset_scenarios, "policy", None + grpc_preset_scenarios, "policy", None, False ) # ------ S3 ------ # Amount of buckets which should be created - buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None) + buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False) # S3 region (AKA placement policy for S3 buckets) - s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None) + s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False) @dataclass @@ -155,88 +162,93 @@ class LoadParams: [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.VERIFY, LoadScenario.HTTP], "no-verify-ssl", "NO_VERIFY_SSL", + False, ) # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. - load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION") + load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False) # Object size in KB for load and preset. - object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE") + object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) # Output registry K6 file. Filled automatically. - registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE") + registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) # Specifies the minimum duration of every single execution (i.e. iteration). # Any iterations that are shorter than this value will cause that VU to # sleep for the remainder of the time until the specified minimum duration is reached. min_iteration_duration: Optional[str] = metadata_field( - all_load_scenarios, None, "K6_MIN_ITERATION_DURATION" + all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False ) # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout - setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT") + setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) # ------- CONSTANT VUS SCENARIO PARAMS ------- # Amount of Writers VU. - writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True) + writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True) # Amount of Readers VU. - readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True) + readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True, True) # Amount of Deleters VU. - deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True) + deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True, True) # ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS ------- # Number of iterations to start during each timeUnit period for write. write_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "WRITE_RATE", True + constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True ) # Number of iterations to start during each timeUnit period for read. read_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "READ_RATE", True + constant_arrival_rate_scenarios, None, "READ_RATE", True, True ) # Number of iterations to start during each timeUnit period for delete. delete_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "DELETE_RATE", True + constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True ) # Amount of preAllocatedVUs for write operations. preallocated_writers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True + constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True ) # Amount of maxVUs for write operations. max_writers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_WRITERS", True + constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True ) # Amount of preAllocatedVUs for read operations. preallocated_readers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True + constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True ) # Amount of maxVUs for read operations. max_readers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_READERS", True + constant_arrival_rate_scenarios, None, "MAX_READERS", False, True ) # Amount of preAllocatedVUs for read operations. preallocated_deleters: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True + constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True ) # Amount of maxVUs for delete operations. max_deleters: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_DELETERS", True + constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True ) # Period of time to apply the rate value. - time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT") + time_unit: Optional[str] = metadata_field( + constant_arrival_rate_scenarios, None, "TIME_UNIT", False + ) # ------- VERIFY SCENARIO PARAMS ------- # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). - verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT") + verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False) # Amount of Verification VU. - verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True) + verify_clients: Optional[int] = metadata_field( + [LoadScenario.VERIFY], None, "CLIENTS", True, False + ) # ------- LOCAL SCENARIO PARAMS ------- # Config file location (filled automatically) - config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE") + config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE", False) def set_id(self, load_id): self.load_id = load_id @@ -267,6 +279,15 @@ class LoadParams: return command_args + def _get_applicable_fields(self): + applicable_fields = [ + meta_field + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.value + ] + + return applicable_fields + @staticmethod def _get_preset_argument(meta_field: MetaField) -> str: if isinstance(meta_field.value, bool): @@ -280,7 +301,7 @@ class LoadParams: data_fields = fields(instance) fields_with_data = [ - MetaField(field.metadata, getattr(instance, field.name)) + MetaField(field.name, field.metadata, getattr(instance, field.name)) for field in data_fields if field.metadata and getattr(instance, field.name) is not None ] @@ -293,3 +314,18 @@ class LoadParams: fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name)) return fields_with_data or [] + + def __str__(self) -> str: + size, unit = calc_unit(self.object_size, 1) + static_params = [f"{self.scenario.value} ({size:.4g} {unit})"] + dynamic_params = [ + f"{meta_field.name}={meta_field.value}" + for meta_field in self._get_applicable_fields() + if meta_field.metadata["string_repr"] + ] + params = ", ".join(static_params + dynamic_params) + + return f"load: {params}" + + def __repr__(self) -> str: + return self.__str__() diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index fa71069..e1056b7 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -1,10 +1,11 @@ from datetime import datetime -from typing import Optional, Tuple +from typing import Optional import yaml from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object +from frostfs_testlib.utils.converting_utils import calc_unit class LoadReport: @@ -62,17 +63,6 @@ class LoadReport: return html - def _calc_unit(self, value: float, skip_units: int = 0) -> Tuple[float, str]: - units = ["B", "KiB", "MiB", "GiB", "TiB"] - - for unit in units[skip_units:]: - if value < 1024: - return value, unit - - value = value / 1024.0 - - return value, unit - def _seconds_to_formatted_duration(self, seconds: int) -> str: """Converts N number of seconds to formatted output ignoring zeroes. Examples: @@ -122,7 +112,7 @@ class LoadReport: ): throughput_html = "" if throughput > 0: - throughput, unit = self._calc_unit(throughput) + throughput, unit = calc_unit(throughput) throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") per_node_errors_html = "" @@ -137,7 +127,7 @@ class LoadReport: ): per_node_errors_html += self._row(f"At {node_key}", errors) - object_size, object_size_unit = self._calc_unit(self.load_params.object_size, 1) + object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) model = self._get_model_string() # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index a9aeb37..2e61679 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -24,6 +24,8 @@ LONG_TIMEOUT = 240 class AwsCliClient(S3ClientWrapper): + __repr_name__: str = "AWS CLI" + # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed # certificate in devenv) and disable automatic pagination in CLI output common_flags = "--no-verify-ssl --no-paginate" diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 6d6fc74..2251efe 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -44,6 +44,8 @@ def report_error(func): class Boto3ClientWrapper(S3ClientWrapper): + __repr_name__: str = "Boto3 client" + @reporter.step_deco("Configure S3 client (boto3)") @report_error def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 3f31395..166abff 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -1,8 +1,10 @@ -from abc import ABC, abstractmethod +from abc import abstractmethod from datetime import datetime from enum import Enum from typing import Literal, Optional, Union +from frostfs_testlib.testing.readable import HumanReadableABC + def _make_objs_dict(key_names): objs_list = [] @@ -29,7 +31,7 @@ ACL_COPY = [ ] -class S3ClientWrapper(ABC): +class S3ClientWrapper(HumanReadableABC): @abstractmethod def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: pass diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 150b963..9748bc2 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -7,11 +7,12 @@ import yaml from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host from frostfs_testlib.storage.constants import ConfigAttributes +from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils @dataclass -class NodeBase(ABC): +class NodeBase(HumanReadableABC): """ Represents a node of some underlying service """ diff --git a/src/frostfs_testlib/storage/dataclasses/object_size.py b/src/frostfs_testlib/storage/dataclasses/object_size.py new file mode 100644 index 0000000..520bdc3 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/object_size.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass + + +@dataclass +class ObjectSize: + name: str + value: int + + def __str__(self) -> str: + return f"{self.name} object size" + + def __repr__(self) -> str: + return self.__str__() diff --git a/src/frostfs_testlib/testing/readable.py b/src/frostfs_testlib/testing/readable.py new file mode 100644 index 0000000..66384b7 --- /dev/null +++ b/src/frostfs_testlib/testing/readable.py @@ -0,0 +1,27 @@ +from abc import ABCMeta + + +class HumanReadableABCMeta(ABCMeta): + def __str__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return cls.__name__ + + def __repr__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return cls.__name__ + + +class HumanReadableABC(metaclass=HumanReadableABCMeta): + @classmethod + def __str__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return type(cls).__name__ + + @classmethod + def __repr__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return type(cls).__name__ diff --git a/src/frostfs_testlib/utils/converting_utils.py b/src/frostfs_testlib/utils/converting_utils.py index 24b77ae..273d9b4 100644 --- a/src/frostfs_testlib/utils/converting_utils.py +++ b/src/frostfs_testlib/utils/converting_utils.py @@ -1,10 +1,23 @@ import base64 import binascii import json +from typing import Tuple import base58 +def calc_unit(value: float, skip_units: int = 0) -> Tuple[float, str]: + units = ["B", "KiB", "MiB", "GiB", "TiB"] + + for unit in units[skip_units:]: + if value < 1024: + return value, unit + + value = value / 1024.0 + + return value, unit + + def str_to_ascii_hex(input: str) -> str: b = binascii.hexlify(input.encode()) return str(b)[2:-1] diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py new file mode 100644 index 0000000..11cda7a --- /dev/null +++ b/tests/test_dataclasses.py @@ -0,0 +1,37 @@ +from typing import Any + +import pytest + +from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper +from frostfs_testlib.storage.dataclasses.frostfs_services import ( + HTTPGate, + InnerRing, + MorphChain, + S3Gate, + StorageNode, +) +from frostfs_testlib.storage.dataclasses.object_size import ObjectSize + + +class TestDataclassesStr: + """Here we are testing important classes string representation.""" + + @pytest.mark.parametrize( + "obj, expected", + [ + (Boto3ClientWrapper, "Boto3 client"), + (AwsCliClient, "AWS CLI"), + (ObjectSize("simple", 1), "simple object size"), + (ObjectSize("simple", 10), "simple object size"), + (ObjectSize("complex", 5000), "complex object size"), + (ObjectSize("complex", 5555), "complex object size"), + (StorageNode, "StorageNode"), + (MorphChain, "MorphChain"), + (S3Gate, "S3Gate"), + (HTTPGate, "HTTPGate"), + (InnerRing, "InnerRing"), + ], + ) + def test_classes_string_representation(self, obj: Any, expected: str): + assert f"{obj}" == expected + assert repr(obj) == expected diff --git a/tests/test_load_config.py b/tests/test_load_config.py index a9b6de1..89a10ea 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -46,6 +46,20 @@ class TestLoadConfig: preset = Preset() self._check_all_values_none(preset) + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) + def test_string_representation_s3_car(self, load_params: LoadParams): + load_params.object_size = 524288 + expected = "load: s3_car (512 MiB), write_rate=10, read_rate=9, delete_rate=11, preallocated_writers=20, preallocated_readers=20, preallocated_deleters=21" + assert f"{load_params}" == expected + assert repr(load_params) == expected + + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) + def test_string_representation_grpc(self, load_params: LoadParams): + load_params.object_size = 512 + expected = "load: grpc (512 KiB), writers=7, readers=7, deleters=8" + assert f"{load_params}" == expected + assert repr(load_params) == expected + def test_load_set_id_changes_fields(self): load_params = LoadParams(load_type=LoadType.S3) load_params.preset = Preset() From b856e820080fba80ce7abf6fb46798b68b66145a Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 24 Jul 2023 10:21:11 +0300 Subject: [PATCH 030/274] Added http hostname as a header to all http calls --- src/frostfs_testlib/steps/http/http_gate.py | 39 ++++++++++++------- src/frostfs_testlib/storage/cluster.py | 4 ++ src/frostfs_testlib/storage/constants.py | 2 + .../storage/dataclasses/frostfs_services.py | 6 +++ 4 files changed, 38 insertions(+), 13 deletions(-) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index efc5258..8080689 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -32,6 +32,7 @@ def get_via_http_gate( cid: str, oid: str, endpoint: str, + http_hostname: str, request_path: Optional[str] = None, timeout: Optional[int] = 300, ): @@ -40,6 +41,7 @@ def get_via_http_gate( cid: container id to get object from oid: object ID endpoint: http gate endpoint + http_hostname: http host name on the node request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] """ @@ -49,13 +51,14 @@ def get_via_http_gate( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False) + resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( f"""Failed to get object via HTTP gate: request: {resp.request.path_url}, response: {resp.text}, + headers: {resp.headers}, status code: {resp.status_code} {resp.reason}""" ) @@ -69,12 +72,13 @@ def get_via_http_gate( @reporter.step_deco("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optional[int] = 300): +def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300): """ This function gets given object from HTTP gate cid: container id to get object from prefix: common prefix endpoint: http gate endpoint + http_hostname: http host name on the node """ request = f"{endpoint}/zip/{cid}/{prefix}" resp = requests.get(request, stream=True, timeout=timeout, verify=False) @@ -84,6 +88,7 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optiona f"""Failed to get object via HTTP gate: request: {resp.request.path_url}, response: {resp.text}, + headers: {resp.headers}, status code: {resp.status_code} {resp.reason}""" ) @@ -105,6 +110,7 @@ def get_via_http_gate_by_attribute( cid: str, attribute: dict, endpoint: str, + http_hostname: str, request_path: Optional[str] = None, timeout: Optional[int] = 300, ): @@ -113,6 +119,7 @@ def get_via_http_gate_by_attribute( cid: CID to get object from attribute: attribute {name: attribute} value pair endpoint: http gate endpoint + http_hostname: http host name on the node request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] """ attr_name = list(attribute.keys())[0] @@ -123,13 +130,14 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False) + resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}) if not resp.ok: raise Exception( f"""Failed to get object via HTTP gate: request: {resp.request.path_url}, response: {resp.text}, + headers: {resp.headers}, status code: {resp.status_code} {resp.reason}""" ) @@ -142,6 +150,7 @@ def get_via_http_gate_by_attribute( return file_path +# TODO: pass http_hostname as a header @reporter.step_deco("Upload via HTTP Gate") def upload_via_http_gate( cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 @@ -188,6 +197,7 @@ def is_object_large(filepath: str) -> bool: return False +# TODO: pass http_hostname as a header @reporter.step_deco("Upload via HTTP Gate using Curl") def upload_via_http_gate_curl( cid: str, @@ -236,17 +246,18 @@ def upload_via_http_gate_curl( @reporter.step_deco("Get via HTTP Gate using Curl") -def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str: +def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: """ This function gets given object from HTTP gate using curl utility. cid: CID to get object from oid: object OID endpoint: http gate endpoint + http_hostname: http host name of the node """ request = f"{endpoint}/get/{cid}/{oid}" file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") - cmd = f"curl -k {request} > {file_path}" + cmd = f"curl -k -H \"Host: {http_hostname}\" {request} > {file_path}" _cmd_run(cmd) return file_path @@ -260,10 +271,10 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): @reporter.step_deco("Try to get object and expect error") def try_to_get_object_and_expect_error( - cid: str, oid: str, error_pattern: str, endpoint: str + cid: str, oid: str, error_pattern: str, endpoint: str, http_hostname: str, ) -> None: try: - get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint) + get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() @@ -272,11 +283,11 @@ def try_to_get_object_and_expect_error( @reporter.step_deco("Verify object can be get using HTTP header attribute") def get_object_by_attr_and_verify_hashes( - oid: str, file_name: str, cid: str, attrs: dict, endpoint: str + oid: str, file_name: str, cid: str, attrs: dict, endpoint: str, http_hostname: str, ) -> None: - got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint) + got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) got_file_path_http_attr = get_via_http_gate_by_attribute( - cid=cid, attribute=attrs, endpoint=endpoint + cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname ) assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr) @@ -289,6 +300,7 @@ def verify_object_hash( shell: Shell, nodes: list[StorageNode], endpoint: str, + http_hostname: str, object_getter=None, ) -> None: @@ -314,7 +326,7 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint) + got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -352,14 +364,15 @@ def try_to_get_object_via_passed_request_and_expect_error( error_pattern: str, endpoint: str, http_request_path: str, + http_hostname: str, attrs: Optional[dict] = None, ) -> None: try: if attrs is None: - get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path) + get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path, http_hostname=http_hostname) else: get_via_http_gate_by_attribute( - cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path + cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path, http_hostname=http_hostname ) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 91487c9..0e24ebb 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -130,6 +130,8 @@ class Cluster: default_rpc_endpoint: str default_s3_gate_endpoint: str default_http_gate_endpoint: str + default_http_hostname: str + default_s3_hostname: str def __init__(self, hosting: Hosting) -> None: self._hosting = hosting @@ -138,6 +140,8 @@ class Cluster: self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint() self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint() self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint() + self.default_http_hostname = self.services(StorageNode)[0].get_http_hostname() + self.default_s3_hostname = self.services(StorageNode)[0].get_s3_hostname() @property def hosts(self) -> list[Host]: diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 95ea3f2..2f9d8a8 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -11,6 +11,8 @@ class ConfigAttributes: ENDPOINT_INTERNAL = "endpoint_internal0" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" + HTTP_HOSTNAME = "http_hostname" + S3_HOSTNAME = "s3_hostname" class _FrostfsServicesNames: diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 23e3335..944837a 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -170,6 +170,12 @@ class StorageNode(NodeBase): def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) + + def get_http_hostname(self) -> str: + return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) + + def get_s3_hostname(self) -> str: + return self._get_attribute(ConfigAttributes.S3_HOSTNAME) def delete_blobovnicza(self): self.host.delete_blobovnicza(self.name) From 612e0887631d8d4221135d5b0bb97a13baac6bae Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 27 Jul 2023 10:49:41 +0300 Subject: [PATCH 031/274] Fix string representation for load params with empty fields Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 10 +++++-- .../controllers/background_load_controller.py | 13 ++++++--- tests/test_load_config.py | 28 +++++++++++++++++-- 3 files changed, 43 insertions(+), 8 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 4e0b71f..ec5d3fd 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -316,8 +316,14 @@ class LoadParams: return fields_with_data or [] def __str__(self) -> str: - size, unit = calc_unit(self.object_size, 1) - static_params = [f"{self.scenario.value} ({size:.4g} {unit})"] + load_type_str = self.scenario.value if self.scenario else self.load_type.value + # TODO: migrate load_params defaults to testlib + if self.object_size is not None: + size, unit = calc_unit(self.object_size, 1) + static_params = [f"{load_type_str} ({size:.4g} {unit})"] + else: + static_params = [f"{load_type_str}"] + dynamic_params = [ f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 91cb1af..aa17f4e 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -53,10 +53,6 @@ class BackgroundLoadController: if load_params.endpoint_selection_strategy is None: raise RuntimeError("endpoint_selection_strategy should not be None") - self.endpoints = self._get_endpoints( - load_params.load_type, load_params.endpoint_selection_strategy - ) - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) def _get_endpoints( self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy @@ -100,6 +96,9 @@ class BackgroundLoadController: @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Prepare load instances") def prepare(self): + self.endpoints = self._get_endpoints( + self.load_params.load_type, self.load_params.endpoint_selection_strategy + ) self.runner.prepare(self.load_params, self.nodes_under_load, self.k6_dir) self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) @@ -204,3 +203,9 @@ class BackgroundLoadController: def _get_results(self) -> dict: with reporter.step(f"Get {self.load_params.scenario.value} scenario results"): return self.runner.get_results() + + def __str__(self) -> str: + return self.load_params.__str__() + + def __repr__(self) -> str: + return repr(self.load_params) diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 89a10ea..a84a188 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -3,7 +3,17 @@ from typing import Any, get_args import pytest -from frostfs_testlib.load.load_config import LoadParams, LoadScenario, LoadType, Preset +from frostfs_testlib.load.load_config import ( + EndpointSelectionStrategy, + LoadParams, + LoadScenario, + LoadType, + Preset, +) +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController +from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode +from frostfs_testlib.storage.dataclasses.node_base import NodeBase @dataclass @@ -36,7 +46,10 @@ class TestLoadConfig: return self._get_filled_load_params(load_type, load_scenario, set_empty) def test_load_params_only_load_type_required(self): - LoadParams(load_type=LoadType.S3) + load_params = LoadParams(load_type=LoadType.S3) + expected = "load: s3" + assert repr(load_params) == expected + assert f"{load_params}" == expected def test_load_params_initially_have_all_values_none(self): load_params = LoadParams(load_type=LoadType.S3) @@ -60,6 +73,17 @@ class TestLoadConfig: assert f"{load_params}" == expected assert repr(load_params) == expected + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) + def test_load_controller_string_representation(self, load_params: LoadParams): + load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL + load_params.object_size = 512 + background_load_controller = BackgroundLoadController( + "tmp", load_params, "wallet", None, None + ) + expected = "load: grpc (512 KiB), writers=7, readers=7, deleters=8" + assert f"{background_load_controller}" == expected + assert repr(background_load_controller) == expected + def test_load_set_id_changes_fields(self): load_params = LoadParams(load_type=LoadType.S3) load_params.preset = Preset() From d6e08c477b8fedeef29ebb72e02f03569c0cd531 Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Wed, 26 Jul 2023 13:35:35 +0300 Subject: [PATCH 032/274] fix divizion by zero, when total operations is zero --- src/frostfs_testlib/load/load_report.py | 14 ++++++++------ src/frostfs_testlib/load/load_verifiers.py | 6 +++--- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index e1056b7..a2cecf6 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -100,7 +100,7 @@ class LoadReport: return model_map[self.load_params.scenario] - def _get_oprations_sub_section_html( + def _get_operations_sub_section_html( self, operation_type: str, total_operations: int, @@ -132,7 +132,9 @@ class LoadReport: model = self._get_model_string() # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s" - + errors_percent = 0 + if total_operations: + errors_percent = total_errors/total_operations*100.0 html = f""" @@ -143,7 +145,7 @@ class LoadReport: {per_node_errors_html} - {self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")} + {self._row("Total", f"{total_errors} ({errors_percent:.2f}%)")} {self._row("Threshold", f"{self.load_params.error_threshold:.2f}%")}
{short_summary}
Errors


""" @@ -228,7 +230,7 @@ class LoadReport: delete_errors[node_key] = metrics.delete_failed_iterations if write_section_required: - html += self._get_oprations_sub_section_html( + html += self._get_operations_sub_section_html( "Write", write_operations, requested_write_rate_str, @@ -239,7 +241,7 @@ class LoadReport: ) if read_section_required: - html += self._get_oprations_sub_section_html( + html += self._get_operations_sub_section_html( "Read", read_operations, requested_read_rate_str, @@ -250,7 +252,7 @@ class LoadReport: ) if delete_section_required: - html += self._get_oprations_sub_section_html( + html += self._get_operations_sub_section_html( "Delete", delete_operations, requested_delete_rate_str, diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index f2a3e7e..80c3962 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -49,15 +49,15 @@ class LoadVerifier: if deleters and not delete_operations: exceptions.append(f"No any delete operation was performed") - if writers and write_errors / write_operations * 100 > self.load_params.error_threshold: + if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold: exceptions.append( f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" ) - if readers and read_errors / read_operations * 100 > self.load_params.error_threshold: + if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold: exceptions.append( f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" ) - if deleters and delete_errors / delete_operations * 100 > self.load_params.error_threshold: + if delete_operations and deleters and delete_errors / delete_operations * 100 > self.load_params.error_threshold: exceptions.append( f"Delete error rate is greater than threshold: {delete_errors / delete_operations * 100} > {self.load_params.error_threshold}" ) From 716a780a13102d6b87841407a01a977532ad606c Mon Sep 17 00:00:00 2001 From: anikeev-yadro Date: Thu, 27 Jul 2023 14:29:22 +0300 Subject: [PATCH 033/274] Add epoch align after tick Signed-off-by: anikeev-yadro --- src/frostfs_testlib/steps/node_management.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index aec9b8a..4b46b62 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -16,6 +16,7 @@ from frostfs_testlib.resources.cli import ( from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch +from frostfs_testlib.steps.epoch import wait_for_epochs_align from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils import datetime_utils @@ -189,6 +190,7 @@ def exclude_node_from_network_map( time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) tick_epoch(shell, cluster) + wait_for_epochs_align(shell, cluster) snapshot = get_netmap_snapshot(node=alive_node, shell=shell) assert ( From 807235af95f019265493056b0009207966eee20a Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 31 Jul 2023 14:08:12 +0300 Subject: [PATCH 034/274] Fix multiple services start (copy array for upstream functions) Signed-off-by: Andrey Berezin --- .../controllers/cluster_state_controller.py | 27 +++++-------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index c73a8f4..3a2b509 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,13 +1,13 @@ +import copy import time -from concurrent.futures import ThreadPoolExecutor import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell -from frostfs_testlib.steps import epoch from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController -from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.testing import parallel +from frostfs_testlib.testing.test_control import run_optionally from frostfs_testlib.utils.failover_utils import ( wait_all_storage_nodes_returned, wait_for_host_offline, @@ -139,15 +139,8 @@ class ClusterStateController: # In case if we stopped couple services, for example (s01-s04): # After starting only s01, it may require connections to s02-s04, which is still down, and fail to start. # Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state. - # So in order to make sure that services are at least attempted to be started, using threads here. - with ThreadPoolExecutor(max_workers=len(self.stopped_storage_nodes)) as executor: - start_result = executor.map(self.start_storage_service, self.stopped_storage_nodes) - - # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, - # But will be thrown here. - # Not ideal solution, but okay for now - for _ in start_result: - pass + # So in order to make sure that services are at least attempted to be started, using parallel runs here. + parallel(self.start_storage_service, copy.copy(self.stopped_storage_nodes)) wait_all_storage_nodes_returned(self.shell, self.cluster) self.stopped_storage_nodes = [] @@ -170,14 +163,8 @@ class ClusterStateController: if not self.stopped_s3_gates: return - with ThreadPoolExecutor(max_workers=len(self.stopped_s3_gates)) as executor: - start_result = executor.map(self.start_s3_gate, self.stopped_s3_gates) - - # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, - # But will be thrown here. - # Not ideal solution, but okay for now - for _ in start_result: - pass + parallel(self.start_s3_gate, copy.copy(self.stopped_s3_gates)) + self.stopped_s3_gates = [] @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Suspend {process_name} service in {node}") From e4878f4d1e4b1aaa1df2006674d5437c9ace90c2 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 2 Aug 2023 21:38:27 +0300 Subject: [PATCH 035/274] Add readable enums Signed-off-by: Andrey Berezin --- src/frostfs_testlib/s3/interfaces.py | 5 ++--- src/frostfs_testlib/steps/session_token.py | 5 +++-- src/frostfs_testlib/storage/dataclasses/acl.py | 11 ++++++----- src/frostfs_testlib/testing/readable.py | 9 +++++++++ tests/test_dataclasses.py | 2 ++ 5 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 166abff..8d82f71 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -1,9 +1,8 @@ from abc import abstractmethod from datetime import datetime -from enum import Enum from typing import Literal, Optional, Union -from frostfs_testlib.testing.readable import HumanReadableABC +from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum def _make_objs_dict(key_names): @@ -15,7 +14,7 @@ def _make_objs_dict(key_names): return objs_dict -class VersioningStatus(Enum): +class VersioningStatus(HumanReadableEnum): ENABLED = "Enabled" SUSPENDED = "Suspended" diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py index 14e25f1..b82d0e2 100644 --- a/src/frostfs_testlib/steps/session_token.py +++ b/src/frostfs_testlib/steps/session_token.py @@ -14,6 +14,7 @@ from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import json_utils, wallet_utils reporter = get_reporter() @@ -26,7 +27,7 @@ WRONG_VERB = "wrong verb of the session" INVALID_SIGNATURE = "invalid signature of the session data" -class ObjectVerb(Enum): +class ObjectVerb(HumanReadableEnum): PUT = "PUT" DELETE = "DELETE" GET = "GET" @@ -36,7 +37,7 @@ class ObjectVerb(Enum): SEARCH = "SEARCH" -class ContainerVerb(Enum): +class ContainerVerb(HumanReadableEnum): CREATE = "PUT" DELETE = "DELETE" SETEACL = "SETEACL" diff --git a/src/frostfs_testlib/storage/dataclasses/acl.py b/src/frostfs_testlib/storage/dataclasses/acl.py index cceb4d8..1330618 100644 --- a/src/frostfs_testlib/storage/dataclasses/acl.py +++ b/src/frostfs_testlib/storage/dataclasses/acl.py @@ -3,6 +3,7 @@ from dataclasses import dataclass from enum import Enum from typing import Any, Dict, List, Optional, Union +from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import wallet_utils logger = logging.getLogger("NeoLogger") @@ -10,7 +11,7 @@ EACL_LIFETIME = 100500 FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 -class EACLOperation(Enum): +class EACLOperation(HumanReadableEnum): PUT = "put" GET = "get" HEAD = "head" @@ -20,24 +21,24 @@ class EACLOperation(Enum): DELETE = "delete" -class EACLAccess(Enum): +class EACLAccess(HumanReadableEnum): ALLOW = "allow" DENY = "deny" -class EACLRole(Enum): +class EACLRole(HumanReadableEnum): OTHERS = "others" USER = "user" SYSTEM = "system" -class EACLHeaderType(Enum): +class EACLHeaderType(HumanReadableEnum): REQUEST = "req" # Filter request headers OBJECT = "obj" # Filter object headers SERVICE = "SERVICE" # Filter service headers. These are not processed by FrostFS nodes and exist for service use only -class EACLMatchType(Enum): +class EACLMatchType(HumanReadableEnum): STRING_EQUAL = "=" # Return true if strings are equal STRING_NOT_EQUAL = "!=" # Return true if strings are different diff --git a/src/frostfs_testlib/testing/readable.py b/src/frostfs_testlib/testing/readable.py index 66384b7..80f1169 100644 --- a/src/frostfs_testlib/testing/readable.py +++ b/src/frostfs_testlib/testing/readable.py @@ -1,4 +1,13 @@ from abc import ABCMeta +from enum import Enum + + +class HumanReadableEnum(Enum): + def __str__(self): + return self._name_ + + def __repr__(self): + return self._name_ class HumanReadableABCMeta(ABCMeta): diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index 11cda7a..f1cc51e 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -3,6 +3,7 @@ from typing import Any import pytest from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper +from frostfs_testlib.storage.dataclasses.acl import EACLRole from frostfs_testlib.storage.dataclasses.frostfs_services import ( HTTPGate, InnerRing, @@ -30,6 +31,7 @@ class TestDataclassesStr: (S3Gate, "S3Gate"), (HTTPGate, "HTTPGate"), (InnerRing, "InnerRing"), + (EACLRole.OTHERS, "OTHERS"), ], ) def test_classes_string_representation(self, obj: Any, expected: str): From d28f3cdc286603e043de882040b61c33935cde77 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 4 Aug 2023 14:19:49 +0300 Subject: [PATCH 036/274] Add UNDEFINED versionins status Signed-off-by: Andrey Berezin --- src/frostfs_testlib/s3/interfaces.py | 1 + src/frostfs_testlib/steps/s3/s3_helper.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 8d82f71..2b6be7d 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -15,6 +15,7 @@ def _make_objs_dict(key_names): class VersioningStatus(HumanReadableEnum): + UNDEFINED = None ENABLED = "Enabled" SUSPENDED = "Suspended" diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index ae27124..4b900eb 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -67,6 +67,9 @@ def try_to_get_objects_and_expect_error( @reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'") def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): + if status == VersioningStatus.UNDEFINED: + return + s3_client.get_bucket_versioning_status(bucket) s3_client.put_bucket_versioning(bucket, status=status) bucket_status = s3_client.get_bucket_versioning_status(bucket) From 02c079eda3145f49bddc59846fe46624d6628230 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Fri, 4 Aug 2023 15:32:09 +0300 Subject: [PATCH 037/274] [OBJECT-3949] delete mainchain ready --- src/frostfs_testlib/steps/payment_neogo.py | 85 ++----------------- src/frostfs_testlib/storage/__init__.py | 3 - src/frostfs_testlib/storage/constants.py | 1 - .../storage/dataclasses/frostfs_services.py | 24 ------ 4 files changed, 9 insertions(+), 104 deletions(-) diff --git a/src/frostfs_testlib/steps/payment_neogo.py b/src/frostfs_testlib/steps/payment_neogo.py index 6a64a5a..7fe0b4d 100644 --- a/src/frostfs_testlib/steps/payment_neogo.py +++ b/src/frostfs_testlib/steps/payment_neogo.py @@ -13,7 +13,7 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.frostfs_services import MainChain, MorphChain +from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils reporter = get_reporter() @@ -21,10 +21,8 @@ logger = logging.getLogger("NeoLogger") EMPTY_PASSWORD = "" TX_PERSIST_TIMEOUT = 15 # seconds -ASSET_POWER_MAINCHAIN = 10**8 ASSET_POWER_SIDECHAIN = 10**12 - def get_nns_contract_hash(morph_chain: MorphChain) -> str: return morph_chain.rpc_client.get_contract_state(1)["hash"] @@ -41,33 +39,7 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] return bytes.decode(base64.b64decode(stack_data[0]["value"])) - -@reporter.step_deco("Withdraw Mainnet Gas") -def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int): - address = wallet_utils.get_last_address_from_wallet(wlt, EMPTY_PASSWORD) - scripthash = neo3_utils.address_to_script_hash(address) - - neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) - out = neogo.contract.invokefunction( - wallet=wlt, - address=address, - rpc_endpoint=main_chain.get_endpoint(), - scripthash=FROSTFS_CONTRACT, - method="withdraw", - arguments=f"{scripthash} int:{amount}", - multisig_hash=f"{scripthash}:Global", - wallet_password="", - ) - - m = re.match(r"^Sent invocation transaction (\w{64})$", out.stdout) - if m is None: - raise Exception("Can not get Tx.") - tx = m.group(1) - if not transaction_accepted(main_chain, tx): - raise AssertionError(f"TX {tx} hasn't been processed") - - -def transaction_accepted(main_chain: MainChain, tx_id: str): +def transaction_accepted(morph_chain: MorphChain, tx_id: str): """ This function returns True in case of accepted TX. Args: @@ -79,8 +51,8 @@ def transaction_accepted(main_chain: MainChain, tx_id: str): try: for _ in range(0, TX_PERSIST_TIMEOUT): time.sleep(1) - neogo = NeoGo(shell=main_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE) - resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=main_chain.get_endpoint()) + neogo = NeoGo(shell=morph_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE) + resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=morph_chain.get_endpoint()) if resp is not None: logger.info(f"TX is accepted in block: {resp}") return True, resp @@ -110,12 +82,11 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_ logger.error(f"failed to get wallet balance: {out}") raise out - @reporter.step_deco("Transfer Gas") def transfer_gas( shell: Shell, amount: int, - main_chain: MainChain, + morph_chain: MorphChain, wallet_from_path: Optional[str] = None, wallet_from_password: Optional[str] = None, address_from: Optional[str] = None, @@ -138,11 +109,11 @@ def transfer_gas( address_to: The address of the wallet to transfer assets to. amount: Amount of gas to transfer. """ - wallet_from_path = wallet_from_path or main_chain.get_wallet_path() + wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() wallet_from_password = ( wallet_from_password if wallet_from_password is not None - else main_chain.get_wallet_password() + else morph_chain.get_wallet_password() ) address_from = address_from or wallet_utils.get_last_address_from_wallet( wallet_from_path, wallet_from_password @@ -153,7 +124,7 @@ def transfer_gas( neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) out = neogo.nep17.transfer( - rpc_endpoint=main_chain.get_endpoint(), + rpc_endpoint=morph_chain.get_endpoint(), wallet=wallet_from_path, wallet_password=wallet_from_password, amount=amount, @@ -165,49 +136,11 @@ def transfer_gas( txid = out.stdout.strip().split("\n")[-1] if len(txid) != 64: raise Exception("Got no TXID after run the command") - if not transaction_accepted(main_chain, txid): + if not transaction_accepted(morph_chain, txid): raise AssertionError(f"TX {txid} hasn't been processed") time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) -@reporter.step_deco("FrostFS Deposit") -def deposit_gas( - shell: Shell, - main_chain: MainChain, - amount: int, - wallet_from_path: str, - wallet_from_password: str, -): - """ - Transferring GAS from given wallet to FrostFS contract address. - """ - # get FrostFS contract address - deposit_addr = converting_utils.contract_hash_to_address(FROSTFS_CONTRACT) - logger.info(f"FrostFS contract address: {deposit_addr}") - address_from = wallet_utils.get_last_address_from_wallet( - wallet_path=wallet_from_path, wallet_password=wallet_from_password - ) - transfer_gas( - shell=shell, - main_chain=main_chain, - amount=amount, - wallet_from_path=wallet_from_path, - wallet_from_password=wallet_from_password, - address_to=deposit_addr, - address_from=address_from, - ) - - -@reporter.step_deco("Get Mainnet Balance") -def get_mainnet_balance(main_chain: MainChain, address: str): - resp = main_chain.rpc_client.get_nep17_balances(address=address) - logger.info(f"Got getnep17balances response: {resp}") - for balance in resp["balance"]: - if balance["assethash"] == GAS_HASH: - return float(balance["amount"]) / ASSET_POWER_MAINCHAIN - return float(0) - - @reporter.step_deco("Get Sidechain Balance") def get_sidechain_balance(morph_chain: MorphChain, address: str): resp = morph_chain.rpc_client.get_nep17_balances(address=address) diff --git a/src/frostfs_testlib/storage/__init__.py b/src/frostfs_testlib/storage/__init__.py index 531964c..3562d25 100644 --- a/src/frostfs_testlib/storage/__init__.py +++ b/src/frostfs_testlib/storage/__init__.py @@ -2,7 +2,6 @@ from frostfs_testlib.storage.constants import _FrostfsServicesNames from frostfs_testlib.storage.dataclasses.frostfs_services import ( HTTPGate, InnerRing, - MainChain, MorphChain, S3Gate, StorageNode, @@ -17,8 +16,6 @@ __class_registry.register_service(_FrostfsServicesNames.INNER_RING, InnerRing) __class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain) __class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate) __class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate) -# # TODO: Remove this since we are no longer have main chain -__class_registry.register_service(_FrostfsServicesNames.MAIN_CHAIN, MainChain) def get_service_registry() -> ServiceRegistry: diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 2f9d8a8..6deedfb 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -21,4 +21,3 @@ class _FrostfsServicesNames: HTTP_GATE = "http-gate" MORPH_CHAIN = "morph-chain" INNER_RING = "ir" - MAIN_CHAIN = "main-chain" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 944837a..ccb30d5 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -110,30 +110,6 @@ class MorphChain(NodeBase): def label(self) -> str: return f"{self.name}: {self.get_endpoint()}" - -class MainChain(NodeBase): - """ - Class represents main-chain consensus node in a cluster - - Consensus node is not always the same as physical host: - It can be service running in a container or on physical host (or physical node, if you will): - For testing perspective, it's not relevant how it is actually running, - since frostfs network will still treat it as "node" - """ - - rpc_client: RPCClient - - def construct(self): - self.rpc_client = RPCClient(self.get_endpoint()) - - def get_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_INTERNAL) - - @property - def label(self) -> str: - return f"{self.name}: {self.get_endpoint()}" - - class StorageNode(NodeBase): """ Class represents storage node in a storage cluster From b1c21e0e5b6ae3d1d3c0ad86d611a2673df0c0d6 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 15 Aug 2023 16:48:28 +0300 Subject: [PATCH 038/274] Add Iptables helper Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/object.py | 42 +++++++++++ src/frostfs_testlib/hosting/config.py | 1 + src/frostfs_testlib/steps/cli/object.py | 62 ++++++++++++++- src/frostfs_testlib/steps/iptables.py | 42 +++++++++++ .../controllers/cluster_state_controller.py | 75 ++++++++++++++++++- .../dataclasses/storage_object_info.py | 18 +++++ src/frostfs_testlib/utils/cli_utils.py | 51 ++++++++++++- 7 files changed, 288 insertions(+), 3 deletions(-) create mode 100644 src/frostfs_testlib/steps/iptables.py diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 8915914..476af68 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -351,3 +351,45 @@ class FrostfsCliObject(CliCommand): "object search", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def nodes( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Search object nodes. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + generate_key: Generate new private key. + oid: Object ID. + trace: Generate trace ID and print it. + root: Search for user objects. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + verify_presence_all: Verify the actual presence of the object on all netmap nodes. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). + + Returns: + Command's result. + """ + return self._execute( + "object nodes", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index dd8b4b9..6679470 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -64,6 +64,7 @@ class HostConfig: services: list[ServiceConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list) attributes: dict[str, str] = field(default_factory=dict) + interfaces: dict[str, str] = field(default_factory=dict) def __post_init__(self) -> None: self.services = [ServiceConfig(**service) for service in self.services or []] diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 9a63604..9c7c694 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -11,8 +11,9 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.utils import json_utils +from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output logger = logging.getLogger("NeoLogger") reporter = get_reporter() @@ -731,3 +732,62 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: latest_block[0].replace(":", ""): int(latest_block[1]), validated_state[0].replace(":", ""): int(validated_state[1]), } + + +@reporter.step_deco("Search object nodes") +def get_object_nodes( + cluster: Cluster, + wallet: str, + cid: str, + oid: str, + shell: Shell, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + wallet_config: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[ClusterNode]: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + + result_object_nodes = cli.object.nodes( + rpc_endpoint=endpoint, + wallet=wallet, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + + parsing_output = parse_cmd_table(result_object_nodes.stdout, "|") + list_object_nodes = [ + node + for node in parsing_output + if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true" + ] + + netmap_nodes_list = parse_netmap_output( + cli.netmap.snapshot( + rpc_endpoint=endpoint, + wallet=wallet, + ).stdout + ) + netmap_nodes = [ + netmap_node + for object_node in list_object_nodes + for netmap_node in netmap_nodes_list + if object_node["node_id"] == netmap_node.node_id + ] + + result = [ + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.host_ip + ] + + return result diff --git a/src/frostfs_testlib/steps/iptables.py b/src/frostfs_testlib/steps/iptables.py new file mode 100644 index 0000000..db0bb22 --- /dev/null +++ b/src/frostfs_testlib/steps/iptables.py @@ -0,0 +1,42 @@ +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.cluster import ClusterNode + + +class IpTablesHelper: + @staticmethod + def drop_input_traffic_to_port(node: ClusterNode, ports: list[str]) -> None: + shell = node.host.get_shell() + for port in ports: + shell.exec(f"iptables -A INPUT -p tcp --dport {port} -j DROP") + + @staticmethod + def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: + shell = node.host.get_shell() + for ip in block_ip: + shell.exec(f"iptables -A INPUT -s {ip} -j DROP") + + @staticmethod + def restore_input_traffic_to_port(node: ClusterNode) -> None: + shell = node.host.get_shell() + ports = ( + shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'") + .stdout.strip() + .split("\n") + ) + if ports[0] == "": + return + for port in ports: + shell.exec(f"iptables -D INPUT -p tcp --dport {port.split(':')[-1]} -j DROP") + + @staticmethod + def restore_input_traffic_to_node(node: ClusterNode) -> None: + shell = node.host.get_shell() + unlock_ip = ( + shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'") + .stdout.strip() + .split("\n") + ) + if unlock_ip[0] == "": + return + for ip in unlock_ip: + shell.exec(f"iptables -D INPUT -s {ip} -j DROP") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 3a2b509..2d439d9 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,13 +1,16 @@ import copy +import itertools import time import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell +from frostfs_testlib.steps import epoch +from frostfs_testlib.steps.iptables import IpTablesHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.testing import parallel -from frostfs_testlib.testing.test_control import run_optionally +from frostfs_testlib.testing.test_control import run_optionally, wait_for_success from frostfs_testlib.utils.failover_utils import ( wait_all_storage_nodes_returned, wait_for_host_offline, @@ -24,6 +27,7 @@ class ClusterStateController: self.detached_disks: dict[str, DiskController] = {} self.stopped_storage_nodes: list[ClusterNode] = [] self.stopped_s3_gates: list[ClusterNode] = [] + self.dropped_traffic: list[ClusterNode] = [] self.cluster = cluster self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} @@ -191,6 +195,62 @@ class ClusterStateController: [node.host.wait_success_resume_process(process_name) for node in list_nodes] self.suspended_services = {} + @reporter.step_deco("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") + def drop_traffic( + self, + mode: str, + node: ClusterNode, + wakeup_timeout: int, + ports: list[str] = None, + block_nodes: list[ClusterNode] = None, + ) -> None: + allowed_modes = ["ports", "nodes"] + assert mode in allowed_modes + + match mode: + case "ports": + IpTablesHelper.drop_input_traffic_to_port(node, ports) + case "nodes": + list_ip = self._parse_intefaces(block_nodes) + IpTablesHelper.drop_input_traffic_to_node(node, list_ip) + time.sleep(wakeup_timeout) + self.dropped_traffic.append(node) + + @reporter.step_deco("Ping traffic") + def ping_traffic( + self, + node: ClusterNode, + nodes_list: list[ClusterNode], + expect_result: int, + ) -> bool: + shell = node.host.get_shell() + options = CommandOptions(check=False) + ips = self._parse_intefaces(nodes_list) + for ip in ips: + code = shell.exec(f"ping {ip} -c 1", options).return_code + if code != expect_result: + return False + return True + + @reporter.step_deco("Start traffic to {node}") + def restore_traffic( + self, + mode: str, + node: ClusterNode, + ) -> None: + allowed_modes = ["ports", "nodes"] + assert mode in allowed_modes + + match mode: + case "ports": + IpTablesHelper.restore_input_traffic_to_port(node=node) + case "nodes": + IpTablesHelper.restore_input_traffic_to_node(node=node) + + @reporter.step_deco("Restore blocked nodes") + def restore_all_traffic(self): + parallel(self._restore_traffic_to_node, self.dropped_traffic) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Hard reboot host {node} via magic SysRq option") def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True): @@ -217,3 +277,16 @@ class ClusterStateController: disk_controller = DiskController(node, device, mountpoint) return disk_controller + + def _restore_traffic_to_node(self, node): + IpTablesHelper.restore_input_traffic_to_port(node) + IpTablesHelper.restore_input_traffic_to_node(node) + + def _parse_intefaces(self, nodes: list[ClusterNode]): + interfaces = [] + for node in nodes: + dict_interfaces = node.host.config.interfaces + for type, ip in dict_interfaces.items(): + if "mgmt" not in type: + interfaces.append(ip) + return interfaces diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index dd46740..7747ea8 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -23,3 +23,21 @@ class StorageObjectInfo(ObjectRef): attributes: Optional[list[dict[str, str]]] = None tombstone: Optional[str] = None locks: Optional[list[LockObjectInfo]] = None + + +@dataclass +class NodeNetmapInfo: + node_id: str + node_status: str + node_data_ip: str + continent: str + country: str + country_code: str + external_address: str + location: str + node: str + price: int + sub_div: str + sub_div_code: int + un_locode: str + role: str diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index d869714..5bd4695 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -5,18 +5,21 @@ """ Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs. """ +import csv import json import logging import subprocess import sys from contextlib import suppress from datetime import datetime +from io import StringIO from textwrap import shorten -from typing import TypedDict, Union +from typing import Dict, List, TypedDict, Union import pexpect from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -131,3 +134,49 @@ def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None: command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): reporter.attach(command_attachment, "Command execution") + + +def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: + """ + The cli command will return something like. + + Epoch: 240 + Node 1: 01234 ONLINE /ip4/10.10.10.10/tcp/8080 + Continent: Europe + Country: Russia + CountryCode: RU + ExternalAddr: /ip4/10.10.11.18/tcp/8080 + Location: Moskva + Node: 10.10.10.12 + Price: 5 + SubDiv: Moskva + SubDivCode: MOW + UN-LOCODE: RU MOW + role: alphabet + + The code will parse each line and return each node as dataclass. + """ + netmap_list = output.split("Node ")[1:] + dataclass_list = [] + for node in netmap_list: + node = node.replace("\t", "").split("\n") + node = *node[0].split(" ")[1:-1], *[row.split(": ")[-1] for row in node[1:-1]] + dataclass_list.append(NodeNetmapInfo(*node)) + + return dataclass_list + + +def parse_cmd_table(output: str, delimiter="|") -> list[dict[str, str]]: + parsing_output = [] + reader = csv.reader(StringIO(output.strip()), delimiter=delimiter) + iter_reader = iter(reader) + header_row = next(iter_reader) + for row in iter_reader: + table = {} + for i in range(len(row)): + header = header_row[i].strip().lower().replace(" ", "_") + value = row[i].strip().lower() + if header: + table[header] = value + parsing_output.append(table) + return parsing_output From 7112bf9c88e89ca61d36be885aa3c99d08cfde38 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 17 Aug 2023 12:54:05 +0300 Subject: [PATCH 039/274] Change NodeNetmapInfo class Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/storage/dataclasses/storage_object_info.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 7747ea8..ea3c510 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -30,6 +30,7 @@ class NodeNetmapInfo: node_id: str node_status: str node_data_ip: str + cluster_name: str continent: str country: str country_code: str From 70595965068404d3eac3c4a5e9feb58ab06e7e7a Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Mon, 21 Aug 2023 14:55:27 +0300 Subject: [PATCH 040/274] Support prepare locally flag Signed-off-by: m.malygina --- src/frostfs_testlib/load/load_config.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index ec5d3fd..3a7e0b4 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -178,6 +178,10 @@ class LoadParams: min_iteration_duration: Optional[str] = metadata_field( all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False ) + # Prepare/cut objects locally on client before sending + prepare_locally: Optional[bool] = metadata_field( + [LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False + ) # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) From aa277fdd6af9c0468ef3ea198b8b494b7bc0e855 Mon Sep 17 00:00:00 2001 From: anikeev-yadro Date: Tue, 29 Aug 2023 16:55:25 +0300 Subject: [PATCH 041/274] Increase default load time Signed-off-by: anikeev-yadro --- src/frostfs_testlib/resources/load_params.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index 6699207..2ced33d 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -11,7 +11,7 @@ BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0) BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0) BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) -BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 600) +BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1200) BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") From 449c18bb1a4be35dc444dfad2af67e279435c7f0 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 30 Aug 2023 15:28:12 +0300 Subject: [PATCH 042/274] Adding options to work with any service type Signed-off-by: Andrey Berezin --- .../controllers/cluster_state_controller.py | 51 +++++++++++++++++-- .../storage/dataclasses/node_base.py | 14 +++-- 2 files changed, 58 insertions(+), 7 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 2d439d9..0148c0d 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,16 +1,15 @@ import copy -import itertools import time import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell -from frostfs_testlib.steps import epoch from frostfs_testlib.steps.iptables import IpTablesHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController +from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.testing import parallel -from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.testing.test_control import run_optionally from frostfs_testlib.utils.failover_utils import ( wait_all_storage_nodes_returned, wait_for_host_offline, @@ -28,6 +27,7 @@ class ClusterStateController: self.stopped_storage_nodes: list[ClusterNode] = [] self.stopped_s3_gates: list[ClusterNode] = [] self.dropped_traffic: list[ClusterNode] = [] + self.stopped_services: set[NodeBase] = set() self.cluster = cluster self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} @@ -128,6 +128,51 @@ class ClusterStateController: node.storage_node.stop_service() self.stopped_storage_nodes.append(node) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop all {service_type} services") + def stop_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + self.stopped_services.update(services) + parallel([service.stop_service for service in services]) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start all {service_type} services") + def start_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.start_service for service in services]) + + if service_type == StorageNode: + wait_all_storage_nodes_returned(self.shell, self.cluster) + + self.stopped_services = self.stopped_services - set(services) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start all stopped services") + def start_all_stopped_services(self): + parallel([service.start_service for service in self.stopped_services]) + + for service in self.stopped_services: + if isinstance(service, StorageNode): + wait_all_storage_nodes_returned(self.shell, self.cluster) + break + + self.stopped_services.clear() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop {service_type} service on {node}") + def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): + service = node.service(service_type) + service.stop_service() + self.stopped_services.add(service) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start {service_type} service on {node}") + def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): + service = node.service(service_type) + service.start_service() + if service in self.stopped_services: + self.stopped_services.remove(service) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start storage service on {node}") def start_storage_service(self, node: ClusterNode): diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 9748bc2..3b1964c 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -1,4 +1,4 @@ -from abc import ABC, abstractmethod +from abc import abstractmethod from dataclasses import dataclass from typing import Optional, Tuple, TypedDict, TypeVar @@ -6,10 +6,13 @@ import yaml from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils +reporter = get_reporter() + @dataclass class NodeBase(HumanReadableABC): @@ -54,17 +57,20 @@ class NodeBase(HumanReadableABC): return self._process_name def start_service(self): - self.host.start_service(self.name) + with reporter.step(f"Start {self.name} service on {self.host.config.address}"): + self.host.start_service(self.name) @abstractmethod def service_healthcheck(self) -> bool: """Service healthcheck.""" def stop_service(self): - self.host.stop_service(self.name) + with reporter.step(f"Stop {self.name} service on {self.host.config.address}"): + self.host.stop_service(self.name) def restart_service(self): - self.host.restart_service(self.name) + with reporter.step(f"Restart {self.name} service on {self.host.config.address}"): + self.host.restart_service(self.name) def get_wallet_password(self) -> str: return self._get_attribute(ConfigAttributes.WALLET_PASSWORD) From e14896400f767ecac8fa381ee3fe7f9f34725d68 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 6 Sep 2023 16:51:18 +0300 Subject: [PATCH 043/274] Add post-init for load params Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 3a7e0b4..9023f87 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -271,6 +271,16 @@ class LoadParams: return env_vars + def __post_init__(self): + default_scenario_map = { + LoadType.gRPC: LoadScenario.gRPC, + LoadType.HTTP: LoadScenario.HTTP, + LoadType.S3: LoadScenario.S3, + } + + if self.scenario is None: + self.scenario = default_scenario_map[self.load_type] + def get_preset_arguments(self): command_args = [ self._get_preset_argument(meta_field) @@ -324,7 +334,7 @@ class LoadParams: # TODO: migrate load_params defaults to testlib if self.object_size is not None: size, unit = calc_unit(self.object_size, 1) - static_params = [f"{load_type_str} ({size:.4g} {unit})"] + static_params = [f"{load_type_str} {size:.4g} {unit}"] else: static_params = [f"{load_type_str}"] @@ -335,7 +345,7 @@ class LoadParams: ] params = ", ".join(static_params + dynamic_params) - return f"load: {params}" + return params def __repr__(self) -> str: return self.__str__() From f2d34dbf2e44a37e5595fb6b27da0770c2367178 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Wed, 6 Sep 2023 20:47:30 +0300 Subject: [PATCH 044/274] add latency report --- src/frostfs_testlib/load/load_metrics.py | 26 +++++++++++++++++++++++- src/frostfs_testlib/load/load_report.py | 23 ++++++++++++++++++++- 2 files changed, 47 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 0b4e28e..6b44de0 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -8,12 +8,15 @@ class MetricsBase(ABC): _WRITE_SUCCESS = "" _WRITE_ERRORS = "" _WRITE_THROUGHPUT = "data_sent" + _WRITE_LATENCY = "" _READ_SUCCESS = "" _READ_ERRORS = "" + _READ_LATENCY = "" _READ_THROUGHPUT = "data_received" _DELETE_SUCCESS = "" + _DELETE_LATENCY = "" _DELETE_ERRORS = "" def __init__(self, summary) -> None: @@ -27,6 +30,10 @@ class MetricsBase(ABC): @property def write_success_iterations(self) -> int: return self._get_metric(self._WRITE_SUCCESS) + + @property + def write_latency(self) -> dict: + return self._get_metric(self._WRITE_LATENCY) @property def write_rate(self) -> float: @@ -47,6 +54,10 @@ class MetricsBase(ABC): @property def read_success_iterations(self) -> int: return self._get_metric(self._READ_SUCCESS) + + @property + def read_latency(self) -> dict: + return self._get_metric(self._READ_LATENCY) @property def read_rate(self) -> int: @@ -67,6 +78,10 @@ class MetricsBase(ABC): @property def delete_success_iterations(self) -> int: return self._get_metric(self._DELETE_SUCCESS) + + @property + def delete_latency(self) -> dict: + return self._get_metric(self._DELETE_LATENCY) @property def delete_failed_iterations(self) -> int: @@ -77,7 +92,7 @@ class MetricsBase(ABC): return self._get_metric_rate(self._DELETE_SUCCESS) def _get_metric(self, metric: str) -> int: - metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric} + metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric, "trend" : self._get_trend_metrics} if metric not in self.metrics: return 0 @@ -114,28 +129,37 @@ class MetricsBase(ABC): def _get_gauge_metric(self, metric: str) -> int: return metric["values"]["value"] + + def _get_trend_metrics(self, metric: str) -> int: + return metric["values"] class GrpcMetrics(MetricsBase): _WRITE_SUCCESS = "frostfs_obj_put_total" _WRITE_ERRORS = "frostfs_obj_put_fails" + _WRITE_LATENCY = "frostfs_obj_put_duration" _READ_SUCCESS = "frostfs_obj_get_total" _READ_ERRORS = "frostfs_obj_get_fails" + _READ_LATENCY = "frostfs_obj_get_duration" _DELETE_SUCCESS = "frostfs_obj_delete_total" _DELETE_ERRORS = "frostfs_obj_delete_fails" + _DELETE_LATENCY = "frostfs_obj_delete_duration" class S3Metrics(MetricsBase): _WRITE_SUCCESS = "aws_obj_put_total" _WRITE_ERRORS = "aws_obj_put_fails" + _WRITE_LATENCY = "aws_obj_put_duration" _READ_SUCCESS = "aws_obj_get_total" _READ_ERRORS = "aws_obj_get_fails" + _READ_LATENCY = "aws_obj_get_duration" _DELETE_SUCCESS = "aws_obj_delete_total" _DELETE_ERRORS = "aws_obj_delete_fails" + _DELETE_LATENCY = "aws_obj_delete_duration" class LocalMetrics(MetricsBase): diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index a2cecf6..26ab542 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -2,6 +2,7 @@ from datetime import datetime from typing import Optional import yaml +import os from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object @@ -109,6 +110,7 @@ class LoadReport: total_rate: float, throughput: float, errors: dict[str, int], + latency: dict[str, dict], ): throughput_html = "" if throughput > 0: @@ -127,6 +129,15 @@ class LoadReport: ): per_node_errors_html += self._row(f"At {node_key}", errors) + latency_html = "" + if latency: + for node_key, param_dict in latency.items(): + latency_values = "" + for param_name, param_val in param_dict.items(): + latency_values += f"{param_name}={param_val:.2f}ms " + + latency_html += self._row(f"Put latency {node_key.split(':')[0]}", latency_values) + object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) model = self._get_model_string() @@ -135,6 +146,7 @@ class LoadReport: errors_percent = 0 if total_operations: errors_percent = total_errors/total_operations*100.0 + html = f""" @@ -142,7 +154,7 @@ class LoadReport: {self._row("Total operations", total_operations)} {self._row("OP/sec", f"{total_rate:.2f}")} {throughput_html} - + {latency_html} {per_node_errors_html} {self._row("Total", f"{total_errors} ({errors_percent:.2f}%)")} @@ -160,6 +172,7 @@ class LoadReport: write_operations = 0 write_op_sec = 0 write_throughput = 0 + write_latency = {} write_errors = {} requested_write_rate = self.load_params.write_rate requested_write_rate_str = ( @@ -169,12 +182,14 @@ class LoadReport: read_operations = 0 read_op_sec = 0 read_throughput = 0 + read_latency = {} read_errors = {} requested_read_rate = self.load_params.read_rate requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else "" delete_operations = 0 delete_op_sec = 0 + delete_latency = {} delete_errors = {} requested_delete_rate = self.load_params.delete_rate requested_delete_rate_str = ( @@ -210,6 +225,7 @@ class LoadReport: if write_operations: write_section_required = True write_op_sec += metrics.write_rate + write_latency[node_key] = metrics.write_latency write_throughput += metrics.write_throughput if metrics.write_failed_iterations: write_errors[node_key] = metrics.write_failed_iterations @@ -219,6 +235,7 @@ class LoadReport: read_section_required = True read_op_sec += metrics.read_rate read_throughput += metrics.read_throughput + read_latency[node_key] = metrics.read_latency if metrics.read_failed_iterations: read_errors[node_key] = metrics.read_failed_iterations @@ -226,6 +243,7 @@ class LoadReport: if delete_operations: delete_section_required = True delete_op_sec += metrics.delete_rate + delete_latency[node_key] = metrics.delete_latency if metrics.delete_failed_iterations: delete_errors[node_key] = metrics.delete_failed_iterations @@ -238,6 +256,7 @@ class LoadReport: write_op_sec, write_throughput, write_errors, + write_latency, ) if read_section_required: @@ -249,6 +268,7 @@ class LoadReport: read_op_sec, read_throughput, read_errors, + read_latency, ) if delete_section_required: @@ -260,6 +280,7 @@ class LoadReport: delete_op_sec, 0, delete_errors, + delete_latency, ) return html From 19b8b96898cee72bff2166b5cf5765347e34fd64 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 7 Sep 2023 14:36:46 +0300 Subject: [PATCH 045/274] Use only name in ObjectSize repr and str Signed-off-by: Andrey Berezin --- src/frostfs_testlib/storage/dataclasses/object_size.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/dataclasses/object_size.py b/src/frostfs_testlib/storage/dataclasses/object_size.py index 520bdc3..0429c78 100644 --- a/src/frostfs_testlib/storage/dataclasses/object_size.py +++ b/src/frostfs_testlib/storage/dataclasses/object_size.py @@ -7,7 +7,7 @@ class ObjectSize: value: int def __str__(self) -> str: - return f"{self.name} object size" + return self.name def __repr__(self) -> str: return self.__str__() From ecf8f0841a1ea01494572fedab41cfcf288c200f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 11 Sep 2023 10:36:54 +0300 Subject: [PATCH 046/274] Change NodeNetmapInfo class Signed-off-by: Dmitriy Zayakin --- .../dataclasses/storage_object_info.py | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index ea3c510..21a820f 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -27,18 +27,17 @@ class StorageObjectInfo(ObjectRef): @dataclass class NodeNetmapInfo: - node_id: str - node_status: str - node_data_ip: str - cluster_name: str - continent: str - country: str - country_code: str - external_address: str - location: str - node: str - price: int - sub_div: str - sub_div_code: int - un_locode: str - role: str + node_id: str = None + node_status: str = None + node_data_ip: str = None + cluster_name: str = None + continent: str = None + country: str = None + country_code: str = None + external_address: str = None + location: str = None + node: str = None + sub_div: str = None + sub_div_code: int = None + un_locode: str = None + role: str = None From f7ef8cb8814a76e98145255e20be091c5acf3a69 Mon Sep 17 00:00:00 2001 From: anikeev-yadro Date: Fri, 15 Sep 2023 12:30:58 +0300 Subject: [PATCH 047/274] Another increase default load time Signed-off-by: anikeev-yadro --- src/frostfs_testlib/resources/load_params.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index 2ced33d..bd99859 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -11,7 +11,7 @@ BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0) BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0) BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) -BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1200) +BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1800) BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") From be9b3f585529d47a89762b8c8f8948ce90004408 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 12 Sep 2023 17:40:45 +0300 Subject: [PATCH 048/274] Update argument func init s3 Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/s3/s3_helper.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 4b900eb..d746337 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -194,6 +194,7 @@ def init_s3_credentials( cluster: Cluster, policy: Optional[dict] = None, s3gates: Optional[list[S3Gate]] = None, + container_placement_policy: Optional[str] = None, ): gate_public_keys = [] bucket = str(uuid.uuid4()) @@ -209,6 +210,7 @@ def init_s3_credentials( wallet_password=wallet.password, container_policy=policy, container_friendly_name=bucket, + container_placement_policy=container_placement_policy, ).stdout aws_access_key_id = str( re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( From b039ee99401ccd760fdab7cfae3e87cef86825dd Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 18 Sep 2023 17:48:30 +0300 Subject: [PATCH 049/274] Dev Env should not use sudo by default --- src/frostfs_testlib/hosting/docker_host.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 3addd92..e2bc949 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -61,7 +61,7 @@ class ServiceAttributes(ParsedAttributes): class DockerHost(Host): """Manages services hosted in Docker containers running on a local or remote machine.""" - def get_shell(self, sudo: bool = True) -> Shell: + def get_shell(self, sudo: bool = False) -> Shell: host_attributes = HostAttributes.parse(self._config.attributes) command_inspectors = [] if sudo: From fc1f37347783bf326e5ec9653140a613cccb2383 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 19 Sep 2023 11:59:05 +0300 Subject: [PATCH 050/274] Adding interval between ssh connection attempts Signed-off-by: Andrey Berezin --- src/frostfs_testlib/shell/ssh_shell.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 5771274..435a494 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -91,8 +91,9 @@ class SSHShell(Shell): # to allow remote command to flush its output buffer DELAY_AFTER_EXIT = 0.2 - SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 3 - CONNECTION_TIMEOUT = 90 + SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4 + SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10 + CONNECTION_TIMEOUT = 60 def __init__( self, @@ -251,7 +252,9 @@ class SSHShell(Shell): return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore")) - def _create_connection(self, attempts: int = SSH_CONNECTION_ATTEMPTS) -> SSHClient: + def _create_connection( + self, attempts: int = SSH_CONNECTION_ATTEMPTS, interval: int = SSH_ATTEMPTS_INTERVAL + ) -> SSHClient: for attempt in range(attempts): connection = SSHClient() connection.set_missing_host_key_policy(AutoAddPolicy()) @@ -295,7 +298,10 @@ class SSHShell(Shell): connection.close() can_retry = attempt + 1 < attempts if can_retry: - logger.warn(f"Can't connect to host {self.host}, will retry. Error: {exc}") + logger.warn( + f"Can't connect to host {self.host}, will retry after {interval}s. Error: {exc}" + ) + sleep(interval) continue logger.exception(f"Can't connect to host {self.host}") raise HostIsNotAvailable(self.host) from exc From 602de43bffe5e22d6a20ce720b986acbc54bcf67 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 19 Sep 2023 18:14:32 +0300 Subject: [PATCH 051/274] Use all nodes for s3 creds --- src/frostfs_testlib/load/interfaces.py | 1 + src/frostfs_testlib/load/runners.py | 4 +++- .../storage/controllers/background_load_controller.py | 7 ++++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/interfaces.py b/src/frostfs_testlib/load/interfaces.py index 6f29868..98c9d62 100644 --- a/src/frostfs_testlib/load/interfaces.py +++ b/src/frostfs_testlib/load/interfaces.py @@ -22,6 +22,7 @@ class ScenarioRunner(ABC): def prepare( self, load_params: LoadParams, + cluster_nodes: list[ClusterNode], nodes_under_load: list[ClusterNode], k6_dir: str, ): diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 428cd7d..a7fa787 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -72,6 +72,7 @@ class DefaultRunner(RunnerBase): def prepare( self, load_params: LoadParams, + cluster_nodes: list[ClusterNode], nodes_under_load: list[ClusterNode], k6_dir: str, ): @@ -81,7 +82,7 @@ class DefaultRunner(RunnerBase): with reporter.step("Init s3 client on loaders"): storage_node = nodes_under_load[0].service(StorageNode) s3_public_keys = [ - node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load + node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes ] grpc_peer = storage_node.get_rpc_endpoint() @@ -290,6 +291,7 @@ class LocalRunner(RunnerBase): def prepare( self, load_params: LoadParams, + cluster_nodes: list[ClusterNode], nodes_under_load: list[ClusterNode], k6_dir: str, ): diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index aa17f4e..c309b65 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -26,6 +26,7 @@ class BackgroundLoadController: load_params: LoadParams original_load_params: LoadParams verification_params: LoadParams + cluster_nodes: list[ClusterNode] nodes_under_load: list[ClusterNode] load_counter: int loaders_wallet: WalletInfo @@ -39,12 +40,14 @@ class BackgroundLoadController: k6_dir: str, load_params: LoadParams, loaders_wallet: WalletInfo, + cluster_nodes: list[ClusterNode], nodes_under_load: list[ClusterNode], runner: ScenarioRunner, ) -> None: self.k6_dir = k6_dir self.original_load_params = load_params self.load_params = copy.deepcopy(self.original_load_params) + self.cluster_nodes = cluster_nodes self.nodes_under_load = nodes_under_load self.load_counter = 1 self.loaders_wallet = loaders_wallet @@ -99,7 +102,9 @@ class BackgroundLoadController: self.endpoints = self._get_endpoints( self.load_params.load_type, self.load_params.endpoint_selection_strategy ) - self.runner.prepare(self.load_params, self.nodes_under_load, self.k6_dir) + self.runner.prepare( + self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir + ) self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) From eb37573df898dacb539a4d9227902925ae91711b Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 25 Sep 2023 16:26:45 +0300 Subject: [PATCH 052/274] [#88] Add read from switch Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/__init__.py | 1 + src/frostfs_testlib/load/load_config.py | 15 +++++++++++- tests/test_load_config.py | 32 ++++++++++++++++++------- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py index e8ed75e..74b710f 100644 --- a/src/frostfs_testlib/load/__init__.py +++ b/src/frostfs_testlib/load/__init__.py @@ -7,6 +7,7 @@ from frostfs_testlib.load.load_config import ( LoadType, NodesSelectionStrategy, Preset, + ReadFrom, ) from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 9023f87..97f5dd6 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -23,6 +23,12 @@ class LoadScenario(Enum): LOCAL = "local" +class ReadFrom(Enum): + REGISTRY = "registry" + PRESET = "preset" + MANUAL = "manual" + + all_load_scenarios = [ LoadScenario.gRPC, LoadScenario.S3, @@ -170,6 +176,8 @@ class LoadParams: load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False) # Object size in KB for load and preset. object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) + # For read operations, controls from which set get objects to read + read_from: Optional[ReadFrom] = None # Output registry K6 file. Filled automatically. registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) # Specifies the minimum duration of every single execution (i.e. iteration). @@ -256,7 +264,12 @@ class LoadParams: def set_id(self, load_id): self.load_id = load_id - self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") + + if self.read_from == ReadFrom.REGISTRY: + self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") + if self.read_from == ReadFrom.PRESET: + self.registry_file = None + if self.preset: self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") diff --git a/tests/test_load_config.py b/tests/test_load_config.py index a84a188..256a04b 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -9,7 +9,9 @@ from frostfs_testlib.load.load_config import ( LoadScenario, LoadType, Preset, + ReadFrom, ) +from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode @@ -47,13 +49,13 @@ class TestLoadConfig: def test_load_params_only_load_type_required(self): load_params = LoadParams(load_type=LoadType.S3) - expected = "load: s3" + expected = "s3" assert repr(load_params) == expected assert f"{load_params}" == expected def test_load_params_initially_have_all_values_none(self): load_params = LoadParams(load_type=LoadType.S3) - self._check_all_values_none(load_params, ["load_type"]) + self._check_all_values_none(load_params, ["load_type", "scenario"]) def test_preset_initially_have_all_values_none(self): preset = Preset() @@ -62,14 +64,14 @@ class TestLoadConfig: @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) def test_string_representation_s3_car(self, load_params: LoadParams): load_params.object_size = 524288 - expected = "load: s3_car (512 MiB), write_rate=10, read_rate=9, delete_rate=11, preallocated_writers=20, preallocated_readers=20, preallocated_deleters=21" + expected = "s3_car 512 MiB, write_rate=10, read_rate=9, delete_rate=11, preallocated_writers=20, preallocated_readers=20, preallocated_deleters=21" assert f"{load_params}" == expected assert repr(load_params) == expected @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) def test_string_representation_grpc(self, load_params: LoadParams): load_params.object_size = 512 - expected = "load: grpc (512 KiB), writers=7, readers=7, deleters=8" + expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" assert f"{load_params}" == expected assert repr(load_params) == expected @@ -78,15 +80,16 @@ class TestLoadConfig: load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL load_params.object_size = 512 background_load_controller = BackgroundLoadController( - "tmp", load_params, "wallet", None, None + "tmp", load_params, "wallet", None, None, DefaultRunner(None) ) - expected = "load: grpc (512 KiB), writers=7, readers=7, deleters=8" + expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" assert f"{background_load_controller}" == expected assert repr(background_load_controller) == expected def test_load_set_id_changes_fields(self): load_params = LoadParams(load_type=LoadType.S3) load_params.preset = Preset() + load_params.read_from = ReadFrom["REGISTRY"] load_params.working_dir = "/tmp" load_params.set_id("test_id") @@ -96,9 +99,18 @@ class TestLoadConfig: # No other values should be changed self._check_all_values_none( - load_params, ["load_type", "working_dir", "load_id", "registry_file", "preset"] + load_params, + [ + "load_type", + "working_dir", + "load_id", + "registry_file", + "preset", + "scenario", + "read_from", + ], ) - self._check_all_values_none(load_params.preset, ["pregen_json"]) + self._check_all_values_none(load_params.preset, ["pregen_json", "scenario"]) @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): @@ -120,6 +132,7 @@ class TestLoadConfig: "READERS": 7, "DELETERS": 8, "PREGEN_JSON": "pregen_json", + "PREPARE_LOCALLY": True, } self._check_preset_params(load_params, expected_preset_args) @@ -152,6 +165,7 @@ class TestLoadConfig: "WRITE_RATE": 10, "READ_RATE": 9, "DELETE_RATE": 11, + "PREPARE_LOCALLY": True, } self._check_preset_params(load_params, expected_preset_args) @@ -319,6 +333,7 @@ class TestLoadConfig: "READERS": 0, "DELETERS": 0, "PREGEN_JSON": "", + "PREPARE_LOCALLY": False, } self._check_preset_params(load_params, expected_preset_args) @@ -353,6 +368,7 @@ class TestLoadConfig: "WRITE_RATE": 0, "READ_RATE": 0, "DELETE_RATE": 0, + "PREPARE_LOCALLY": False, } self._check_preset_params(load_params, expected_preset_args) From 64f004d5a5f0298eaa0bc653f0e98172ffa57c87 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 27 Sep 2023 10:38:28 +0300 Subject: [PATCH 053/274] Add read-from to verify settings --- .../storage/controllers/background_load_controller.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index c309b65..38cdf0f 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -9,7 +9,6 @@ from frostfs_testlib.load.load_config import ( LoadScenario, LoadType, ) -from frostfs_testlib.load.load_metrics import get_metrics_object from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.reporter import get_reporter @@ -181,6 +180,7 @@ class BackgroundLoadController: self.verification_params = LoadParams( verify_clients=self.load_params.verify_clients, scenario=LoadScenario.VERIFY, + read_from=self.load_params.read_from, registry_file=self.load_params.registry_file, verify_time=self.load_params.verify_time, load_type=self.load_params.load_type, From 9feb8135e381b601fa9fca2104ddd09dad1daa0f Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Wed, 27 Sep 2023 16:32:29 +0300 Subject: [PATCH 054/274] local scenario Signed-off-by: m.malygina --- src/frostfs_testlib/load/load_metrics.py | 1 + src/frostfs_testlib/storage/constants.py | 1 + src/frostfs_testlib/storage/dataclasses/frostfs_services.py | 3 +++ 3 files changed, 5 insertions(+) diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 6b44de0..5cec8ea 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -165,6 +165,7 @@ class S3Metrics(MetricsBase): class LocalMetrics(MetricsBase): _WRITE_SUCCESS = "local_obj_put_total" _WRITE_ERRORS = "local_obj_put_fails" + _WRITE_LATENCY = "local_obj_put_duration" _READ_SUCCESS = "local_obj_get_total" _READ_ERRORS = "local_obj_get_fails" diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 6deedfb..dbaac5a 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -4,6 +4,7 @@ class ConfigAttributes: WALLET_PATH = "wallet_path" WALLET_CONFIG = "wallet_config" CONFIG_PATH = "config_path" + SHARD_CONFIG_PATH = "shard_config_path" LOCAL_WALLET_PATH = "local_wallet_path" LOCAL_WALLET_CONFIG = "local_config_path" ENDPOINT_DATA_0 = "endpoint_data0" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index ccb30d5..ac2885b 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -138,6 +138,9 @@ class StorageNode(NodeBase): ) return health_metric in output + def get_shard_config_path(self) -> str: + return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH) + def get_control_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) From 98ccd4c38259ef81ef4c5eeacc22bba0c4d761f4 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 3 Oct 2023 15:18:29 +0300 Subject: [PATCH 055/274] [#91] Failover enhancements Signed-off-by: Andrey Berezin --- pyproject.toml | 3 + src/frostfs_testlib/cli/frostfs_cli/cli.py | 2 + src/frostfs_testlib/cli/frostfs_cli/tree.py | 29 +++++++++ .../healthcheck/basic_healthcheck.py | 14 ++++ src/frostfs_testlib/healthcheck/interfaces.py | 9 +++ src/frostfs_testlib/hosting/config.py | 2 + src/frostfs_testlib/load/interfaces.py | 2 +- src/frostfs_testlib/load/k6.py | 39 +++++++++-- src/frostfs_testlib/load/load_config.py | 23 +++++++ src/frostfs_testlib/load/load_metrics.py | 16 +++-- src/frostfs_testlib/load/load_report.py | 21 +++--- src/frostfs_testlib/load/load_verifiers.py | 64 ++++++++++++------- src/frostfs_testlib/load/runners.py | 4 +- src/frostfs_testlib/resources/load_params.py | 3 +- .../controllers/background_load_controller.py | 23 ++++--- src/frostfs_testlib/testing/parallel.py | 2 +- 16 files changed, 200 insertions(+), 56 deletions(-) create mode 100644 src/frostfs_testlib/cli/frostfs_cli/tree.py create mode 100644 src/frostfs_testlib/healthcheck/basic_healthcheck.py create mode 100644 src/frostfs_testlib/healthcheck/interfaces.py diff --git a/pyproject.toml b/pyproject.toml index f85b883..778e2fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,9 @@ allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" [project.entry-points."frostfs.testlib.hosting"] docker = "frostfs_testlib.hosting.docker_host:DockerHost" +[project.entry-points."frostfs.testlib.healthcheck"] +basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" + [tool.isort] profile = "black" src_paths = ["src", "tests"] diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index 5d55f55..a78da8b 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -8,6 +8,7 @@ from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup +from frostfs_testlib.cli.frostfs_cli.tree import FrostfsCliTree from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion from frostfs_testlib.shell import Shell @@ -36,3 +37,4 @@ class FrostfsCli: self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file) self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file) self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) + self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/tree.py b/src/frostfs_testlib/cli/frostfs_cli/tree.py new file mode 100644 index 0000000..af330fe --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/tree.py @@ -0,0 +1,29 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliTree(CliCommand): + def healthcheck( + self, + wallet: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Get internal balance of FrostFS account + + Args: + address: Address of wallet account. + owner: Owner of balance account (omit to use owner from private key). + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + + Returns: + Command's result. + + """ + return self._execute( + "tree healthcheck", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py new file mode 100644 index 0000000..9ec8694 --- /dev/null +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -0,0 +1,14 @@ +from frostfs_testlib.healthcheck.interfaces import Healthcheck +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.steps.node_management import storage_node_healthcheck +from frostfs_testlib.storage.cluster import ClusterNode + +reporter = get_reporter() + + +class BasicHealthcheck(Healthcheck): + @reporter.step_deco("Perform healthcheck for {cluster_node}") + def perform_healthcheck(self, cluster_node: ClusterNode): + health_check = storage_node_healthcheck(cluster_node.storage_node) + if health_check.health_status != "READY" or health_check.network_status != "ONLINE": + raise AssertionError("Node {cluster_node} is not healthy") diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py new file mode 100644 index 0000000..0c77957 --- /dev/null +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -0,0 +1,9 @@ +from abc import ABC, abstractmethod + +from frostfs_testlib.storage.cluster import ClusterNode + + +class Healthcheck(ABC): + @abstractmethod + def perform(self, cluster_node: ClusterNode): + """Perform healthcheck on the target cluster node""" diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 6679470..88fe3e7 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -52,6 +52,7 @@ class HostConfig: Attributes: plugin_name: Name of plugin that should be used to manage the host. + healthcheck_plugin_name: Name of the plugin for healthcheck operations. address: Address of the machine (IP or DNS name). services: List of services hosted on the machine. clis: List of CLI tools available on the machine. @@ -60,6 +61,7 @@ class HostConfig: """ plugin_name: str + healthcheck_plugin_name: str address: str services: list[ServiceConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list) diff --git a/src/frostfs_testlib/load/interfaces.py b/src/frostfs_testlib/load/interfaces.py index 98c9d62..394fff7 100644 --- a/src/frostfs_testlib/load/interfaces.py +++ b/src/frostfs_testlib/load/interfaces.py @@ -50,7 +50,7 @@ class ScenarioRunner(ABC): """Returns True if load is running at the moment""" @abstractmethod - def wait_until_finish(self): + def wait_until_finish(self, soft_timeout: int = 0): """Wait until load is finished""" @abstractmethod diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index cb3576e..e7a2b39 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -3,6 +3,7 @@ import logging import math import os from dataclasses import dataclass +from datetime import datetime from time import sleep from typing import Any from urllib.parse import urlparse @@ -39,6 +40,7 @@ class LoadResults: class K6: _k6_process: RemoteProcess + _start_time: datetime def __init__( self, @@ -122,6 +124,7 @@ class K6: with reporter.step( f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}" ): + self._start_time = int(datetime.utcnow().timestamp()) command = ( f"{self._k6_dir}/k6 run {self._generate_env_variables()} " f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" @@ -131,7 +134,7 @@ class K6: command, self.shell, self.load_params.working_dir, user ) - def wait_until_finished(self) -> None: + def wait_until_finished(self, soft_timeout: int = 0) -> None: with reporter.step( f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}" ): @@ -140,9 +143,36 @@ class K6: else: timeout = self.load_params.load_time or 0 - timeout += int(K6_TEARDOWN_PERIOD) + current_time = int(datetime.utcnow().timestamp()) + working_time = current_time - self._start_time + remaining_time = timeout - working_time + + setup_teardown_time = ( + int(K6_TEARDOWN_PERIOD) + + self.load_params.get_init_time() + + int(self.load_params.setup_timeout.replace("s", "").strip()) + ) + remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time + timeout = remaining_time_including_setup_and_teardown + + if soft_timeout: + timeout = min(timeout, soft_timeout) + original_timeout = timeout + timeouts = { + "K6 start time": self._start_time, + "Current time": current_time, + "K6 working time": working_time, + "Remaining time for load": remaining_time, + "Setup and teardown": setup_teardown_time, + "Remaining time including setup/teardown": remaining_time_including_setup_and_teardown, + "Soft timeout": soft_timeout, + "Selected timeout": original_timeout, + } + + reporter.attach("\n".join([f"{k}: {v}" for k, v in timeouts.items()]), "timeouts.txt") + min_wait_interval = 10 wait_interval = min_wait_interval if self._k6_process is None: @@ -162,7 +192,8 @@ class K6: return self.stop() - raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") + if not soft_timeout: + raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") def get_results(self) -> Any: with reporter.step( @@ -187,7 +218,7 @@ class K6: def stop(self) -> None: with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"): - if self.is_running: + if self.is_running(): self._k6_process.stop() self._wait_until_process_end() diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 97f5dd6..678fc38 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -1,3 +1,4 @@ +import math import os from dataclasses import dataclass, field, fields, is_dataclass from enum import Enum @@ -133,6 +134,12 @@ class Preset: # S3 region (AKA placement policy for S3 buckets) s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False) + # Delay between containers creation and object upload for preset + object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False) + + # Flag to control preset erorrs + ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False) + @dataclass class LoadParams: @@ -194,6 +201,12 @@ class LoadParams: # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) + # Delay for read operations in case if we read from registry + read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", None, False) + + # Initialization time for each VU for k6 load + vu_init_time: Optional[float] = None + # ------- CONSTANT VUS SCENARIO PARAMS ------- # Amount of Writers VU. writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True) @@ -306,6 +319,16 @@ class LoadParams: return command_args + def get_init_time(self) -> int: + return math.ceil(self._get_total_vus() * self.vu_init_time) + + def _get_total_vus(self) -> int: + vu_fields = ["writers", "preallocated_writers"] + data_fields = [ + getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields + ] + return sum(data_fields) + def _get_applicable_fields(self): applicable_fields = [ meta_field diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 5cec8ea..6c201ec 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -30,7 +30,7 @@ class MetricsBase(ABC): @property def write_success_iterations(self) -> int: return self._get_metric(self._WRITE_SUCCESS) - + @property def write_latency(self) -> dict: return self._get_metric(self._WRITE_LATENCY) @@ -54,7 +54,7 @@ class MetricsBase(ABC): @property def read_success_iterations(self) -> int: return self._get_metric(self._READ_SUCCESS) - + @property def read_latency(self) -> dict: return self._get_metric(self._READ_LATENCY) @@ -78,7 +78,7 @@ class MetricsBase(ABC): @property def delete_success_iterations(self) -> int: return self._get_metric(self._DELETE_SUCCESS) - + @property def delete_latency(self) -> dict: return self._get_metric(self._DELETE_LATENCY) @@ -92,7 +92,11 @@ class MetricsBase(ABC): return self._get_metric_rate(self._DELETE_SUCCESS) def _get_metric(self, metric: str) -> int: - metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric, "trend" : self._get_trend_metrics} + metrics_method_map = { + "counter": self._get_counter_metric, + "gauge": self._get_gauge_metric, + "trend": self._get_trend_metrics, + } if metric not in self.metrics: return 0 @@ -129,8 +133,8 @@ class MetricsBase(ABC): def _get_gauge_metric(self, metric: str) -> int: return metric["values"]["value"] - - def _get_trend_metrics(self, metric: str) -> int: + + def _get_trend_metrics(self, metric: str) -> int: return metric["values"] diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 26ab542..ec6d539 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -2,7 +2,6 @@ from datetime import datetime from typing import Optional import yaml -import os from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object @@ -110,7 +109,7 @@ class LoadReport: total_rate: float, throughput: float, errors: dict[str, int], - latency: dict[str, dict], + latency: dict[str, dict], ): throughput_html = "" if throughput > 0: @@ -131,12 +130,16 @@ class LoadReport: latency_html = "" if latency: - for node_key, param_dict in latency.items(): - latency_values = "" - for param_name, param_val in param_dict.items(): - latency_values += f"{param_name}={param_val:.2f}ms " + for node_key, latency_dict in latency.items(): + latency_values = "N/A" + if latency_dict: + latency_values = "" + for param_name, param_val in latency_dict.items(): + latency_values += f"{param_name}={param_val:.2f}ms " - latency_html += self._row(f"Put latency {node_key.split(':')[0]}", latency_values) + latency_html += self._row( + f"{operation_type} latency {node_key.split(':')[0]}", latency_values + ) object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) @@ -145,8 +148,8 @@ class LoadReport: short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s" errors_percent = 0 if total_operations: - errors_percent = total_errors/total_operations*100.0 - + errors_percent = total_errors / total_operations * 100.0 + html = f"""
{short_summary}
Errors
diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index 80c3962..b691b02 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -12,7 +12,7 @@ class LoadVerifier: def __init__(self, load_params: LoadParams) -> None: self.load_params = load_params - def verify_load_results(self, load_summaries: dict[str, dict]): + def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]: write_operations = 0 write_errors = 0 @@ -41,38 +41,58 @@ class LoadVerifier: delete_operations += metrics.delete_total_iterations delete_errors += metrics.delete_failed_iterations - exceptions = [] + issues = [] if writers and not write_operations: - exceptions.append(f"No any write operation was performed") + issues.append(f"No any write operation was performed") if readers and not read_operations: - exceptions.append(f"No any read operation was performed") + issues.append(f"No any read operation was performed") if deleters and not delete_operations: - exceptions.append(f"No any delete operation was performed") + issues.append(f"No any delete operation was performed") - if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold: - exceptions.append( + if ( + write_operations + and writers + and write_errors / write_operations * 100 > self.load_params.error_threshold + ): + issues.append( f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" ) - if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold: - exceptions.append( + if ( + read_operations + and readers + and read_errors / read_operations * 100 > self.load_params.error_threshold + ): + issues.append( f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" ) - if delete_operations and deleters and delete_errors / delete_operations * 100 > self.load_params.error_threshold: - exceptions.append( + if ( + delete_operations + and deleters + and delete_errors / delete_operations * 100 > self.load_params.error_threshold + ): + issues.append( f"Delete error rate is greater than threshold: {delete_errors / delete_operations * 100} > {self.load_params.error_threshold}" ) - assert not exceptions, "\n".join(exceptions) + return issues - def check_verify_results(self, load_summaries, verification_summaries) -> None: - for node_or_endpoint in load_summaries: - with reporter.step(f"Check verify scenario results for {node_or_endpoint}"): - self._check_verify_result( - load_summaries[node_or_endpoint], verification_summaries[node_or_endpoint] + def collect_verify_issues(self, load_summaries, verification_summaries) -> list[str]: + verify_issues: list[str] = [] + for k6_process_label in load_summaries: + with reporter.step(f"Check verify scenario results for {k6_process_label}"): + verify_issues.extend( + self._collect_verify_issues_on_process( + k6_process_label, + load_summaries[k6_process_label], + verification_summaries[k6_process_label], + ) ) + return verify_issues - def _check_verify_result(self, load_summary, verification_summary) -> None: - exceptions = [] + def _collect_verify_issues_on_process( + self, label, load_summary, verification_summary + ) -> list[str]: + issues = [] load_metrics = get_metrics_object(self.load_params.scenario, load_summary) @@ -92,8 +112,8 @@ class LoadVerifier: # Due to interruptions we may see total verified objects to be less than written on writers count if abs(total_left_objects - verified_objects) > writers: - exceptions.append( - f"Verified objects mismatch. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}." + issues.append( + f"Verified objects mismatch for {label}. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}." ) - assert not exceptions, "\n".join(exceptions) + return issues diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index a7fa787..489ddcd 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -43,8 +43,8 @@ class RunnerBase(ScenarioRunner): parallel([k6.preset for k6 in self.k6_instances]) @reporter.step_deco("Wait until load finish") - def wait_until_finish(self): - parallel([k6.wait_until_finished for k6 in self.k6_instances]) + def wait_until_finish(self, soft_timeout: int = 0): + parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout) @property def is_running(self): diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index bd99859..97193cc 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -13,6 +13,7 @@ BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1800) BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) +BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME = float(os.getenv("BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME", 0.8)) BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") # This will decrease load params for some weak environments @@ -26,7 +27,7 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv( BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") # TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) -PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "10") +PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1") K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30") K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300)) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 38cdf0f..a18a603 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -158,25 +158,27 @@ class BackgroundLoadController: @reporter.step_deco("Run post-load verification") def verify(self): try: - self._verify_load_results() + load_issues = self._collect_load_issues() if self.load_params.verify: - self._run_verify_scenario() + load_issues.extend(self._run_verify_scenario()) + + assert not load_issues, "\n".join(load_issues) finally: self._reset_for_consequent_load() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify load results") - def _verify_load_results(self): + @reporter.step_deco("Collect load issues") + def _collect_load_issues(self): verifier = LoadVerifier(self.load_params) - verifier.verify_load_results(self.load_summaries) + return verifier.collect_load_issues(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def wait_until_finish(self): - self.runner.wait_until_finish() + def wait_until_finish(self, soft_timeout: int = 0): + self.runner.wait_until_finish(soft_timeout) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Verify loaded objects") - def _run_verify_scenario(self): + def _run_verify_scenario(self) -> list[str]: self.verification_params = LoadParams( verify_clients=self.load_params.verify_clients, scenario=LoadScenario.VERIFY, @@ -185,6 +187,7 @@ class BackgroundLoadController: verify_time=self.load_params.verify_time, load_type=self.load_params.load_type, load_id=self.load_params.load_id, + vu_init_time=0, working_dir=self.load_params.working_dir, endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, @@ -199,10 +202,10 @@ class BackgroundLoadController: self.runner.start() self.runner.wait_until_finish() - with reporter.step("Check verify results"): + with reporter.step("Collect verify issues"): verification_summaries = self._get_results() verifier = LoadVerifier(self.load_params) - verifier.check_verify_results(self.load_summaries, verification_summaries) + return verifier.collect_verify_issues(self.load_summaries, verification_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) def _get_results(self) -> dict: diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index 7f4ee26..ebddd38 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -42,7 +42,7 @@ def parallel( exceptions = [future.exception() for future in futures if future.exception()] if exceptions: message = "\n".join([str(e) for e in exceptions]) - raise RuntimeError(f"The following exceptions occured during parallel run: {message}") + raise RuntimeError(f"The following exceptions occured during parallel run:\n {message}") return futures From e919064bb96a44ee4fcb3931250ba182c5d1bbc0 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 5 Oct 2023 16:42:28 +0300 Subject: [PATCH 056/274] [#92] Fix method name Signed-off-by: Andrey Berezin --- src/frostfs_testlib/healthcheck/basic_healthcheck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 9ec8694..3f4bc79 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -8,7 +8,7 @@ reporter = get_reporter() class BasicHealthcheck(Healthcheck): @reporter.step_deco("Perform healthcheck for {cluster_node}") - def perform_healthcheck(self, cluster_node: ClusterNode): + def perform(self, cluster_node: ClusterNode): health_check = storage_node_healthcheck(cluster_node.storage_node) if health_check.health_status != "READY" or health_check.network_status != "ONLINE": raise AssertionError("Node {cluster_node} is not healthy") From d039bcc221a8170b780a5f1fcc60fac18b498191 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 9 Oct 2023 22:30:14 +0300 Subject: [PATCH 057/274] Add IfUpDown utility helper Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/iptables.py | 42 --------- src/frostfs_testlib/steps/network.py | 89 +++++++++++++++++++ .../controllers/cluster_state_controller.py | 24 ++++- .../testing/cluster_test_base.py | 22 ++++- 4 files changed, 130 insertions(+), 47 deletions(-) delete mode 100644 src/frostfs_testlib/steps/iptables.py create mode 100644 src/frostfs_testlib/steps/network.py diff --git a/src/frostfs_testlib/steps/iptables.py b/src/frostfs_testlib/steps/iptables.py deleted file mode 100644 index db0bb22..0000000 --- a/src/frostfs_testlib/steps/iptables.py +++ /dev/null @@ -1,42 +0,0 @@ -from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.cluster import ClusterNode - - -class IpTablesHelper: - @staticmethod - def drop_input_traffic_to_port(node: ClusterNode, ports: list[str]) -> None: - shell = node.host.get_shell() - for port in ports: - shell.exec(f"iptables -A INPUT -p tcp --dport {port} -j DROP") - - @staticmethod - def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: - shell = node.host.get_shell() - for ip in block_ip: - shell.exec(f"iptables -A INPUT -s {ip} -j DROP") - - @staticmethod - def restore_input_traffic_to_port(node: ClusterNode) -> None: - shell = node.host.get_shell() - ports = ( - shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'") - .stdout.strip() - .split("\n") - ) - if ports[0] == "": - return - for port in ports: - shell.exec(f"iptables -D INPUT -p tcp --dport {port.split(':')[-1]} -j DROP") - - @staticmethod - def restore_input_traffic_to_node(node: ClusterNode) -> None: - shell = node.host.get_shell() - unlock_ip = ( - shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'") - .stdout.strip() - .split("\n") - ) - if unlock_ip[0] == "": - return - for ip in unlock_ip: - shell.exec(f"iptables -D INPUT -s {ip} -j DROP") diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py new file mode 100644 index 0000000..a865461 --- /dev/null +++ b/src/frostfs_testlib/steps/network.py @@ -0,0 +1,89 @@ +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import retry + +reporter = get_reporter() + + +class IpTablesHelper: + @staticmethod + def drop_input_traffic_to_port(node: ClusterNode, ports: list[str]) -> None: + shell = node.host.get_shell() + for port in ports: + shell.exec(f"iptables -A INPUT -p tcp --dport {port} -j DROP") + + @staticmethod + def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: + shell = node.host.get_shell() + for ip in block_ip: + shell.exec(f"iptables -A INPUT -s {ip} -j DROP") + + @staticmethod + def restore_input_traffic_to_port(node: ClusterNode) -> None: + shell = node.host.get_shell() + ports = ( + shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'") + .stdout.strip() + .split("\n") + ) + if ports[0] == "": + return + for port in ports: + shell.exec(f"iptables -D INPUT -p tcp --dport {port.split(':')[-1]} -j DROP") + + @staticmethod + def restore_input_traffic_to_node(node: ClusterNode) -> None: + shell = node.host.get_shell() + unlock_ip = ( + shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'") + .stdout.strip() + .split("\n") + ) + if unlock_ip[0] == "": + return + for ip in unlock_ip: + shell.exec(f"iptables -D INPUT -s {ip} -j DROP") + + +# TODO Move class to HOST +class IfUpDownHelper: + @reporter.step_deco("Down {interface} to {node}") + def down_interface(self, node: ClusterNode, interface: str) -> None: + shell = node.host.get_shell() + shell.exec(f"ifdown {interface}") + + @reporter.step_deco("Up {interface} to {node}") + def up_interface(self, node: ClusterNode, interface: str) -> None: + shell = node.host.get_shell() + shell.exec(f"ifup {interface}") + + @reporter.step_deco("Up all interface to {node}") + def up_all_interface(self, node: ClusterNode) -> None: + shell = node.host.get_shell() + interfaces = list(node.host.config.interfaces.keys()) + shell.exec("ifup -av") + for name_interface in interfaces: + self.check_state_up(node, name_interface) + + @reporter.step_deco("Down all interface to {node}") + def down_all_interface(self, node: ClusterNode) -> None: + shell = node.host.get_shell() + interfaces = list(node.host.config.interfaces.keys()) + shell.exec("ifdown -av") + for name_interface in interfaces: + self.check_state_down(node, name_interface) + + @reporter.step_deco("Check {node} to {interface}") + def check_state(self, node: ClusterNode, interface: str) -> str: + shell = node.host.get_shell() + return shell.exec( + f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'" + ).stdout.strip() + + @retry(max_attempts=5, sleep_interval=5, expected_result="UP") + def check_state_up(self, node: ClusterNode, interface: str) -> str: + return self.check_state(node=node, interface=interface) + + @retry(max_attempts=5, sleep_interval=5, expected_result="DOWN") + def check_state_down(self, node: ClusterNode, interface: str) -> str: + return self.check_state(node=node, interface=interface) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 0148c0d..ed82167 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -4,7 +4,7 @@ import time import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell -from frostfs_testlib.steps.iptables import IpTablesHelper +from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass @@ -18,6 +18,7 @@ from frostfs_testlib.utils.failover_utils import ( ) reporter = get_reporter() +if_up_down_helper = IfUpDownHelper() class ClusterStateController: @@ -31,6 +32,7 @@ class ClusterStateController: self.cluster = cluster self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} + self.nodes_with_modified_interface: list[ClusterNode] = [] @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop host of node {node}") @@ -312,6 +314,26 @@ class ClusterStateController: wait_for_host_online(self.shell, node.storage_node) wait_for_node_online(node.storage_node) + @reporter.step_deco("Down {interface} to {nodes}") + def down_interface(self, nodes: list[ClusterNode], interface: str): + for node in nodes: + if_up_down_helper.down_interface(node=node, interface=interface) + assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN" + self.nodes_with_modified_interface.append(node) + + @reporter.step_deco("Up {interface} to {nodes}") + def up_interface(self, nodes: list[ClusterNode], interface: str): + for node in nodes: + if_up_down_helper.up_interface(node=node, interface=interface) + assert if_up_down_helper.check_state(node=node, interface=interface) == "UP" + if node in self.nodes_with_modified_interface: + self.nodes_with_modified_interface.remove(node) + + @reporter.step_deco("Restore interface") + def restore_interfaces(self): + for node in self.nodes_with_modified_interface: + if_up_down_helper.up_all_interface(node) + def _get_disk_controller( self, node: StorageNode, device: str, mountpoint: str ) -> DiskController: diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index 11f67f0..0676813 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -1,10 +1,13 @@ +import time from typing import Optional from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps import epoch from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode +from frostfs_testlib.utils import datetime_utils reporter = get_reporter() @@ -14,13 +17,24 @@ class ClusterTestBase: shell: Shell cluster: Cluster - @reporter.step_deco("Tick {epochs_to_tick} epochs") - def tick_epochs(self, epochs_to_tick: int, alive_node: Optional[StorageNode] = None): + @reporter.step_deco("Tick {epochs_to_tick} epochs, wait {wait_block} block") + def tick_epochs( + self, + epochs_to_tick: int, + alive_node: Optional[StorageNode] = None, + wait_block: int = None, + ): for _ in range(epochs_to_tick): - self.tick_epoch(alive_node) + self.tick_epoch(alive_node, wait_block) - def tick_epoch(self, alive_node: Optional[StorageNode] = None): + def tick_epoch( + self, + alive_node: Optional[StorageNode] = None, + wait_block: int = None, + ): epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) + if wait_block: + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * wait_block) def wait_for_epochs_align(self): epoch.wait_for_epochs_align(self.shell, self.cluster) From 2c2af7f8ed0ca9199d0a21d0091f260083fbc243 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 10 Oct 2023 17:47:46 +0300 Subject: [PATCH 058/274] Keep only one ssh connection per host Signed-off-by: Andrey Berezin --- src/frostfs_testlib/shell/__init__.py | 2 +- src/frostfs_testlib/shell/ssh_shell.py | 188 +++++++++------- .../controllers/cluster_state_controller.py | 15 +- tests/test_ssh_shell.py | 200 ++++++++++++------ 4 files changed, 261 insertions(+), 144 deletions(-) diff --git a/src/frostfs_testlib/shell/__init__.py b/src/frostfs_testlib/shell/__init__.py index 0300ff8..980d119 100644 --- a/src/frostfs_testlib/shell/__init__.py +++ b/src/frostfs_testlib/shell/__init__.py @@ -1,3 +1,3 @@ from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell from frostfs_testlib.shell.local_shell import LocalShell -from frostfs_testlib.shell.ssh_shell import SSHShell +from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 435a494..6db7d51 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -20,12 +20,117 @@ from paramiko import ( from paramiko.ssh_exception import AuthenticationException from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell +from frostfs_testlib.shell.interfaces import ( + CommandInspector, + CommandOptions, + CommandResult, + Shell, + SshCredentials, +) logger = logging.getLogger("frostfs.testlib.shell") reporter = get_reporter() +class SshConnectionProvider: + SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4 + SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10 + CONNECTION_TIMEOUT = 60 + + instance = None + connections: dict[str, SSHClient] = {} + creds: dict[str, SshCredentials] = {} + + def __new__(cls): + if not cls.instance: + cls.instance = super(SshConnectionProvider, cls).__new__(cls) + return cls.instance + + def store_creds(self, host: str, ssh_creds: SshCredentials): + self.creds[host] = ssh_creds + + def provide(self, host: str, port: str) -> SSHClient: + if host not in self.creds: + raise RuntimeError(f"Please add credentials for host {host}") + + if host in self.connections: + client = self.connections[host] + if client: + return client + + creds = self.creds[host] + client = self._create_connection(host, port, creds) + self.connections[host] = client + return client + + def drop(self, host: str): + if host in self.connections: + client = self.connections.pop(host) + client.close() + + def drop_all(self): + hosts = list(self.connections.keys()) + for host in hosts: + self.drop(host) + + def _create_connection( + self, + host: str, + port: str, + creds: SshCredentials, + ) -> SSHClient: + for attempt in range(self.SSH_CONNECTION_ATTEMPTS): + connection = SSHClient() + connection.set_missing_host_key_policy(AutoAddPolicy()) + try: + if creds.ssh_key_path: + logger.info( + f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " + f"{creds.ssh_key_path} (attempt {attempt})" + ) + connection.connect( + hostname=host, + port=port, + username=creds.ssh_login, + pkey=_load_private_key(creds.ssh_key_path, creds.ssh_key_passphrase), + timeout=self.CONNECTION_TIMEOUT, + ) + else: + logger.info( + f"Trying to connect to host {host} as {creds.ssh_login} using password " + f"(attempt {attempt})" + ) + connection.connect( + hostname=host, + port=port, + username=creds.ssh_login, + password=creds.ssh_password, + timeout=self.CONNECTION_TIMEOUT, + ) + return connection + except AuthenticationException: + connection.close() + logger.exception(f"Can't connect to host {host}") + raise + except ( + SSHException, + ssh_exception.NoValidConnectionsError, + AttributeError, + socket.timeout, + OSError, + ) as exc: + connection.close() + can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS + if can_retry: + logger.warn( + f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}" + ) + sleep(self.SSH_ATTEMPTS_INTERVAL) + continue + logger.exception(f"Can't connect to host {host}") + raise HostIsNotAvailable(host) from exc + + class HostIsNotAvailable(Exception): """Raised when host is not reachable via SSH connection.""" @@ -91,10 +196,6 @@ class SSHShell(Shell): # to allow remote command to flush its output buffer DELAY_AFTER_EXIT = 0.2 - SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4 - SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10 - CONNECTION_TIMEOUT = 60 - def __init__( self, host: str, @@ -106,23 +207,21 @@ class SSHShell(Shell): command_inspectors: Optional[list[CommandInspector]] = None, ) -> None: super().__init__() + self.connection_provider = SshConnectionProvider() + self.connection_provider.store_creds( + host, SshCredentials(login, password, private_key_path, private_key_passphrase) + ) self.host = host self.port = port - self.login = login - self.password = password - self.private_key_path = private_key_path - self.private_key_passphrase = private_key_passphrase + self.command_inspectors = command_inspectors or [] - self.__connection: Optional[SSHClient] = None @property def _connection(self): - if not self.__connection: - self.__connection = self._create_connection() - return self.__connection + return self.connection_provider.provide(self.host, self.port) def drop(self): - self._reset_connection() + self.connection_provider.drop(self.host) def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: options = options or CommandOptions() @@ -196,7 +295,7 @@ class SSHShell(Shell): socket.timeout, ) as exc: logger.exception(f"Can't execute command {command} on host: {self.host}") - self._reset_connection() + self.drop() raise HostIsNotAvailable(self.host) from exc def _read_channels( @@ -251,62 +350,3 @@ class SSHShell(Shell): full_stderr = b"".join(stderr_chunks) return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore")) - - def _create_connection( - self, attempts: int = SSH_CONNECTION_ATTEMPTS, interval: int = SSH_ATTEMPTS_INTERVAL - ) -> SSHClient: - for attempt in range(attempts): - connection = SSHClient() - connection.set_missing_host_key_policy(AutoAddPolicy()) - try: - if self.private_key_path: - logger.info( - f"Trying to connect to host {self.host} as {self.login} using SSH key " - f"{self.private_key_path} (attempt {attempt})" - ) - connection.connect( - hostname=self.host, - port=self.port, - username=self.login, - pkey=_load_private_key(self.private_key_path, self.private_key_passphrase), - timeout=self.CONNECTION_TIMEOUT, - ) - else: - logger.info( - f"Trying to connect to host {self.host} as {self.login} using password " - f"(attempt {attempt})" - ) - connection.connect( - hostname=self.host, - port=self.port, - username=self.login, - password=self.password, - timeout=self.CONNECTION_TIMEOUT, - ) - return connection - except AuthenticationException: - connection.close() - logger.exception(f"Can't connect to host {self.host}") - raise - except ( - SSHException, - ssh_exception.NoValidConnectionsError, - AttributeError, - socket.timeout, - OSError, - ) as exc: - connection.close() - can_retry = attempt + 1 < attempts - if can_retry: - logger.warn( - f"Can't connect to host {self.host}, will retry after {interval}s. Error: {exc}" - ) - sleep(interval) - continue - logger.exception(f"Can't connect to host {self.host}") - raise HostIsNotAvailable(self.host) from exc - - def _reset_connection(self) -> None: - if self.__connection: - self.__connection.close() - self.__connection = None diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index ed82167..c6391f5 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -3,7 +3,7 @@ import time import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.shell import CommandOptions, Shell +from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController @@ -37,6 +37,10 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop host of node {node}") def stop_node_host(self, node: ClusterNode, mode: str): + # Drop ssh connection for this node before shutdown + provider = SshConnectionProvider() + provider.drop(node.host_ip) + with reporter.step(f"Stop host {node.host.config.address}"): node.host.stop_host(mode=mode) wait_for_host_offline(self.shell, node.storage_node) @@ -48,6 +52,11 @@ class ClusterStateController: nodes = ( reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes ) + + # Drop all ssh connections before shutdown + provider = SshConnectionProvider() + provider.drop_all() + for node in nodes: with reporter.step(f"Stop host {node.host.config.address}"): self.stopped_nodes.append(node) @@ -307,6 +316,10 @@ class ClusterStateController: options = CommandOptions(close_stdin=True, timeout=1, check=False) shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) + # Drop ssh connection for this node + provider = SshConnectionProvider() + provider.drop(node.host_ip) + if wait_for_return: # Let the things to be settled # A little wait here to prevent ssh stuck during panic diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py index 4d1c0fd..ecd8c3c 100644 --- a/tests/test_ssh_shell.py +++ b/tests/test_ssh_shell.py @@ -1,50 +1,68 @@ import os -from unittest import SkipTest, TestCase + +import pytest from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput -from frostfs_testlib.shell.ssh_shell import SSHShell +from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell from helpers import format_error_details, get_output_lines -def init_shell() -> SSHShell: - host = os.getenv("SSH_SHELL_HOST") +def get_shell(host: str): port = os.getenv("SSH_SHELL_PORT", "22") login = os.getenv("SSH_SHELL_LOGIN") - private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH") - private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE") + + password = os.getenv("SSH_SHELL_PASSWORD", "") + private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH", "") + private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE", "") if not all([host, login, private_key_path, private_key_passphrase]): # TODO: in the future we might use https://pypi.org/project/mock-ssh-server, # at the moment it is not suitable for us because of its issues with stdin - raise SkipTest("SSH connection is not configured") + pytest.skip("SSH connection is not configured") return SSHShell( host=host, port=port, login=login, + password=password, private_key_path=private_key_path, private_key_passphrase=private_key_passphrase, ) -class TestSSHShellInteractive(TestCase): - @classmethod - def setUpClass(cls): - cls.shell = init_shell() +@pytest.fixture(scope="module") +def shell() -> SSHShell: + return get_shell(host=os.getenv("SSH_SHELL_HOST")) - def test_command_with_one_prompt(self): + +@pytest.fixture(scope="module") +def shell_same_host() -> SSHShell: + return get_shell(host=os.getenv("SSH_SHELL_HOST")) + + +@pytest.fixture(scope="module") +def shell_another_host() -> SSHShell: + return get_shell(host=os.getenv("SSH_SHELL_HOST_2")) + + +@pytest.fixture(scope="function", autouse=True) +def reset_connection(): + provider = SshConnectionProvider() + provider.drop_all() + + +class TestSSHShellInteractive: + def test_command_with_one_prompt(self, shell: SSHShell): script = "password = input('Password: '); print('\\n' + password)" inputs = [InteractiveInput(prompt_pattern="Password", input="test")] - result = self.shell.exec( - f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) - ) + result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - self.assertEqual(0, result.return_code) - self.assertEqual(["Password: test", "test"], get_output_lines(result)) - self.assertEqual("", result.stderr) + assert result.return_code == 0 + assert ["Password: test", "test"] == get_output_lines(result) + assert not result.stderr - def test_command_with_several_prompts(self): + def test_command_with_several_prompts(self, shell: SSHShell): script = ( "input1 = input('Input1: '); print('\\n' + input1); " "input2 = input('Input2: '); print('\\n' + input2)" @@ -54,86 +72,132 @@ class TestSSHShellInteractive(TestCase): InteractiveInput(prompt_pattern="Input2", input="test2"), ] - result = self.shell.exec( - f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) - ) + result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - self.assertEqual(0, result.return_code) - self.assertEqual( - ["Input1: test1", "test1", "Input2: test2", "test2"], get_output_lines(result) - ) - self.assertEqual("", result.stderr) + assert result.return_code == 0 + assert ["Input1: test1", "test1", "Input2: test2", "test2"] == get_output_lines(result) + assert not result.stderr - def test_invalid_command_with_check(self): + def test_invalid_command_with_check(self, shell: SSHShell): script = "invalid script" inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - with self.assertRaises(RuntimeError) as raised: - self.shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) + with pytest.raises(RuntimeError) as raised: + shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - error = format_error_details(raised.exception) - self.assertIn("SyntaxError", error) - self.assertIn("return code: 1", error) + error = format_error_details(raised.value) + assert "SyntaxError" in error + assert "return code: 1" in error - def test_invalid_command_without_check(self): + def test_invalid_command_without_check(self, shell: SSHShell): script = "invalid script" inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - result = self.shell.exec( + result = shell.exec( f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs, check=False), ) - self.assertIn("SyntaxError", result.stdout) - self.assertEqual(1, result.return_code) + assert "SyntaxError" in result.stdout + assert result.return_code == 1 - def test_non_existing_binary(self): + def test_non_existing_binary(self, shell: SSHShell): inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - with self.assertRaises(RuntimeError) as raised: - self.shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) + with pytest.raises(RuntimeError) as raised: + shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) - error = format_error_details(raised.exception) - self.assertIn("return code: 127", error) + error = format_error_details(raised.value) + assert "return code: 127" in error -class TestSSHShellNonInteractive(TestCase): - @classmethod - def setUpClass(cls): - cls.shell = init_shell() - - def test_correct_command(self): +class TestSSHShellNonInteractive: + def test_correct_command(self, shell: SSHShell): script = "print('test')" - result = self.shell.exec(f'python3 -c "{script}"') + result = shell.exec(f'python3 -c "{script}"') - self.assertEqual(0, result.return_code) - self.assertEqual("test", result.stdout.strip()) - self.assertEqual("", result.stderr) + assert result.return_code == 0 + assert result.stdout.strip() == "test" + assert not result.stderr - def test_invalid_command_with_check(self): + def test_invalid_command_with_check(self, shell: SSHShell): script = "invalid script" - with self.assertRaises(RuntimeError) as raised: - self.shell.exec(f'python3 -c "{script}"') + with pytest.raises(RuntimeError) as raised: + shell.exec(f'python3 -c "{script}"') - error = format_error_details(raised.exception) - self.assertIn("Error", error) - self.assertIn("return code: 1", error) + error = format_error_details(raised.value) + assert "Error" in error + assert "return code: 1" in error - def test_invalid_command_without_check(self): + def test_invalid_command_without_check(self, shell: SSHShell): script = "invalid script" - result = self.shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) + result = shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) - self.assertEqual(1, result.return_code) + assert result.return_code == 1 # TODO: we have inconsistency with local shell here, the local shell captures error info # in stdout while ssh shell captures it in stderr - self.assertIn("Error", result.stderr) + assert "Error" in result.stderr - def test_non_existing_binary(self): - with self.assertRaises(RuntimeError) as exc: - self.shell.exec("not-a-command") + def test_non_existing_binary(self, shell: SSHShell): + with pytest.raises(RuntimeError) as raised: + shell.exec("not-a-command") - error = format_error_details(exc.exception) - self.assertIn("Error", error) - self.assertIn("return code: 127", error) + error = format_error_details(raised.value) + assert "Error" in error + assert "return code: 127" in error + + +class TestSSHShellConnection: + def test_connection_provider_is_singleton(self): + provider = SshConnectionProvider() + provider2 = SshConnectionProvider() + assert id(provider) == id(provider2) + + def test_connection_provider_has_creds(self, shell: SSHShell): + provider = SshConnectionProvider() + assert len(provider.creds) == 1 + assert len(provider.connections) == 0 + + def test_connection_provider_has_only_one_connection(self, shell: SSHShell): + provider = SshConnectionProvider() + assert len(provider.connections) == 0 + shell.exec("echo 1") + assert len(provider.connections) == 1 + shell.exec("echo 2") + assert len(provider.connections) == 1 + shell.drop() + assert len(provider.connections) == 0 + + def test_connection_same_host(self, shell: SSHShell, shell_same_host: SSHShell): + provider = SshConnectionProvider() + assert len(provider.connections) == 0 + + shell.exec("echo 1") + assert len(provider.connections) == 1 + + shell_same_host.exec("echo 2") + assert len(provider.connections) == 1 + + shell.drop() + assert len(provider.connections) == 0 + + shell.exec("echo 3") + assert len(provider.connections) == 1 + + def test_connection_another_host(self, shell: SSHShell, shell_another_host: SSHShell): + provider = SshConnectionProvider() + assert len(provider.connections) == 0 + + shell.exec("echo 1") + assert len(provider.connections) == 1 + + shell_another_host.exec("echo 2") + assert len(provider.connections) == 2 + + shell.drop() + assert len(provider.connections) == 1 + + shell_another_host.drop() + assert len(provider.connections) == 0 From 98f9c78f099d9740a669c66cb65d5a7674e7d041 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 11 Oct 2023 18:21:40 +0300 Subject: [PATCH 059/274] [#97] Probe fix for filedescriptor issue Signed-off-by: Andrey Berezin --- src/frostfs_testlib/shell/local_shell.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 56d19b2..fa07890 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -39,7 +39,7 @@ class LocalShell(Shell): log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output try: - command_process = pexpect.spawn(command, timeout=options.timeout) + command_process = pexpect.spawn(command, timeout=options.timeout, use_poll=True) except (pexpect.ExceptionPexpect, OSError) as exc: raise RuntimeError(f"Command: {command}") from exc From dd347dd8fbb9aa3fa777e358ee71cba56038ecae Mon Sep 17 00:00:00 2001 From: Dmitry Anurin Date: Wed, 11 Oct 2023 11:10:58 +0300 Subject: [PATCH 060/274] Added unit to logs getter Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/hosting/docker_host.py | 1 + src/frostfs_testlib/hosting/interfaces.py | 1 + 2 files changed, 2 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index e2bc949..ffc2082 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -217,6 +217,7 @@ class DockerHost(Host): message_regex: str, since: Optional[datetime] = None, until: Optional[datetime] = None, + unit: Optional[str] = None, ) -> bool: client = self._get_docker_client() for service_config in self._config.services: diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index b4f67fb..48344cc 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -257,6 +257,7 @@ class Host(ABC): message_regex: str, since: Optional[datetime] = None, until: Optional[datetime] = None, + unit: Optional[str] = None, ) -> bool: """Checks logs on host for specified message regex. From 1c3bbe26f72a7e66199037e7b524ea27033a7571 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 17 Oct 2023 17:45:23 +0300 Subject: [PATCH 061/274] [#98] Small dependency cleanup Signed-off-by: Andrey Berezin --- pyproject.toml | 2 +- requirements.txt | 1 - src/frostfs_testlib/plugins/__init__.py | 8 +------- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 778e2fc..bf65d15 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ keywords = ["frostfs", "test"] dependencies = [ "allure-python-commons>=2.13.2", "docker>=4.4.0", - "importlib_metadata>=5.0; python_version < '3.10'", + "pyyaml==6.0.1", "neo-mamba==1.0.0", "paramiko>=2.10.3", "pexpect>=4.8.0", diff --git a/requirements.txt b/requirements.txt index 1fdf844..32e604f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ allure-python-commons==2.13.2 docker==4.4.0 -importlib_metadata==5.0.0 neo-mamba==1.0.0 paramiko==2.10.3 pexpect==4.8.0 diff --git a/src/frostfs_testlib/plugins/__init__.py b/src/frostfs_testlib/plugins/__init__.py index fcd7acc..6914b9b 100644 --- a/src/frostfs_testlib/plugins/__init__.py +++ b/src/frostfs_testlib/plugins/__init__.py @@ -1,12 +1,6 @@ -import sys +from importlib.metadata import entry_points from typing import Any -if sys.version_info < (3, 10): - # On Python prior 3.10 we need to use backport of entry points - from importlib_metadata import entry_points -else: - from importlib.metadata import entry_points - def load_plugin(plugin_group: str, name: str) -> Any: """Loads plugin using entry point specification. From cff5db5a6786963df34be2afc58e956d37db6a6f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 18 Oct 2023 10:55:22 +0300 Subject: [PATCH 062/274] Change func parsing netmap Signed-off-by: Dmitriy Zayakin --- .../dataclasses/storage_object_info.py | 5 +- src/frostfs_testlib/utils/cli_utils.py | 62 ++++++++++++------- 2 files changed, 42 insertions(+), 25 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 21a820f..f7d51db 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -29,14 +29,15 @@ class StorageObjectInfo(ObjectRef): class NodeNetmapInfo: node_id: str = None node_status: str = None - node_data_ip: str = None + node_data_ips: list[str] = None cluster_name: str = None continent: str = None country: str = None country_code: str = None - external_address: str = None + external_address: list[str] = None location: str = None node: str = None + price: int = None sub_div: str = None sub_div_code: int = None un_locode: str = None diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 5bd4695..0fa6cde 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -8,6 +8,7 @@ Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs. import csv import json import logging +import re import subprocess import sys from contextlib import suppress @@ -138,32 +139,47 @@ def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None: def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: """ - The cli command will return something like. - - Epoch: 240 - Node 1: 01234 ONLINE /ip4/10.10.10.10/tcp/8080 - Continent: Europe - Country: Russia - CountryCode: RU - ExternalAddr: /ip4/10.10.11.18/tcp/8080 - Location: Moskva - Node: 10.10.10.12 - Price: 5 - SubDiv: Moskva - SubDivCode: MOW - UN-LOCODE: RU MOW - role: alphabet - The code will parse each line and return each node as dataclass. """ - netmap_list = output.split("Node ")[1:] - dataclass_list = [] - for node in netmap_list: - node = node.replace("\t", "").split("\n") - node = *node[0].split(" ")[1:-1], *[row.split(": ")[-1] for row in node[1:-1]] - dataclass_list.append(NodeNetmapInfo(*node)) + netmap_nodes = output.split("Node ")[1:] + dataclasses_netmap = [] + result_netmap = {} - return dataclass_list + regexes = { + "node_id": r"\d+: (?P\w+)", + "node_data_ips": r"(?P/ip4/.+?)$", + "node_status": r"(?PONLINE|OFFLINE)", + "cluster_name": r"ClusterName: (?P\w+)", + "continent": r"Continent: (?P\w+)", + "country": r"Country: (?P\w+)", + "country_code": r"CountryCode: (?P\w+)", + "external_address": r"ExternalAddr: (?P/ip[4].+?)$", + "location": r"Location: (?P\w+.*)", + "node": r"Node: (?P\d+\.\d+\.\d+\.\d+)", + "price": r"Price: (?P\d+)", + "sub_div": r"SubDiv: (?P.*)", + "sub_div_code": r"SubDivCode: (?P\w+)", + "un_locode": r"UN-LOCODE: (?P\w+.*)", + "role": r"role: (?P\w+)", + } + + for node in netmap_nodes: + for key, regex in regexes.items(): + search_result = re.search(regex, node, flags=re.MULTILINE) + if key == "node_data_ips": + result_netmap[key] = search_result[key].strip().split(" ") + continue + if key == "external_address": + result_netmap[key] = search_result[key].strip().split(",") + continue + if search_result == None: + result_netmap[key] = None + continue + result_netmap[key] = search_result[key].strip() + + dataclasses_netmap.append(NodeNetmapInfo(**result_netmap)) + + return dataclasses_netmap def parse_cmd_table(output: str, delimiter="|") -> list[dict[str, str]]: From e1f3444e9252f9ead667841d83957b816134ba2e Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 20 Oct 2023 18:08:22 +0300 Subject: [PATCH 063/274] [#100] Add new method for logs gathering Signed-off-by: Andrey Berezin --- src/frostfs_testlib/hosting/docker_host.py | 24 +++++++++++++++++++ src/frostfs_testlib/hosting/interfaces.py | 28 ++++++++++++++++++---- 2 files changed, 48 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index ffc2082..289c94d 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -212,6 +212,30 @@ class DockerHost(Host): with open(file_path, "wb") as file: file.write(logs) + def get_filtered_logs( + self, + filter_regex: str, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + unit: Optional[str] = None, + ) -> str: + client = self._get_docker_client() + filtered_logs = "" + for service_config in self._config.services: + container_name = self._get_service_attributes(service_config.name).container_name + try: + filtered_logs = client.logs(container_name, since=since, until=until) + except HTTPError as exc: + logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") + continue + + matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE) + found = list(matches) + if found: + filtered_logs += f"{container_name}:\n{os.linesep.join(found)}" + + return filtered_logs + def is_message_in_logs( self, message_regex: str, diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 48344cc..4c94ca0 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -115,7 +115,6 @@ class Host(ABC): service_name: Name of the service to restart. """ - @abstractmethod def get_data_directory(self, service_name: str) -> str: """ @@ -126,7 +125,6 @@ class Host(ABC): service_name: Name of storage node service. """ - @abstractmethod def wait_success_suspend_process(self, process_name: str) -> None: """Search for a service ID by its name and stop the process @@ -251,6 +249,27 @@ class Host(ABC): filter_regex: regex to filter output """ + @abstractmethod + def get_filtered_logs( + self, + filter_regex: str, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + unit: Optional[str] = None, + ) -> str: + """Get logs from host filtered by regex. + + Args: + filter_regex: regex filter for logs. + since: If set, limits the time from which logs should be collected. Must be in UTC. + until: If set, limits the time until which logs should be collected. Must be in UTC. + unit: required unit. + + Returns: + Found entries as str if any found. + Empty string otherwise. + """ + @abstractmethod def is_message_in_logs( self, @@ -270,10 +289,11 @@ class Host(ABC): True if message found in logs in the given time frame. False otherwise. """ - @abstractmethod - def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: + def wait_for_service_to_be_in_state( + self, systemd_service_name: str, expected_state: str, timeout: int + ) -> None: """ Waites for service to be in specified state. From 0c3bb20af5c353887cec98f6c0dc203f2b3ed26c Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 24 Oct 2023 13:57:11 +0300 Subject: [PATCH 064/274] Add method to interfaces Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/storage/cluster.py | 35 +++++++++++++++++++ .../storage/dataclasses/frostfs_services.py | 8 +++-- .../dataclasses/storage_object_info.py | 11 ++++++ 3 files changed, 52 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 0e24ebb..7a48a1d 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -17,6 +17,7 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import ( StorageNode, ) from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry reporter = get_reporter() @@ -121,6 +122,40 @@ class ClusterNode: config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services ] + def get_all_interfaces(self) -> dict[str, str]: + return self.host.config.interfaces + + def get_interface(self, interface: Interfaces) -> str: + return self.host.config.interfaces[interface.value] + + def get_data_interfaces(self) -> list[str]: + return [ + ip_address + for name_interface, ip_address in self.host.config.interfaces.items() + if "data" in name_interface + ] + + def get_data_interface(self, search_interface: str) -> list[str]: + return [ + self.host.config.interfaces[interface] + for interface in self.host.config.interfaces.keys() + if search_interface == interface + ] + + def get_internal_interfaces(self) -> list[str]: + return [ + ip_address + for name_interface, ip_address in self.host.config.interfaces.items() + if "internal" in name_interface + ] + + def get_internal_interface(self, search_internal: str) -> list[str]: + return [ + self.host.config.interfaces[interface] + for interface in self.host.config.interfaces.keys() + if search_internal == interface + ] + class Cluster: """ diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index ac2885b..9e6783c 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -110,6 +110,10 @@ class MorphChain(NodeBase): def label(self) -> str: return f"{self.name}: {self.get_endpoint()}" + def get_http_endpoint(self) -> str: + return self._get_attribute("http_endpoint") + + class StorageNode(NodeBase): """ Class represents storage node in a storage cluster @@ -149,10 +153,10 @@ class StorageNode(NodeBase): def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) - + def get_http_hostname(self) -> str: return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) - + def get_s3_hostname(self) -> str: return self._get_attribute(ConfigAttributes.S3_HOSTNAME) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index f7d51db..d670d8e 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,6 +1,9 @@ from dataclasses import dataclass +from enum import Enum from typing import Optional +from frostfs_testlib.testing.readable import HumanReadableEnum + @dataclass class ObjectRef: @@ -42,3 +45,11 @@ class NodeNetmapInfo: sub_div_code: int = None un_locode: str = None role: str = None + + +class Interfaces(HumanReadableEnum): + DATA_O: str = "data0" + DATA_1: str = "data1" + MGMT: str = "mgmt" + INTERNAL_0: str = "internal0" + INTERNAL_1: str = "internal1" From b1a3d740e99e7d7e9ec7fdcb939c3b0572ce989a Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 25 Oct 2023 15:57:38 +0300 Subject: [PATCH 065/274] [#102] Updates for failover Signed-off-by: Andrey Berezin --- pyproject.toml | 2 +- src/frostfs_testlib/storage/cluster.py | 36 +++++++++++++++++++ .../controllers/cluster_state_controller.py | 4 +-- .../storage/dataclasses/frostfs_services.py | 3 ++ .../storage/dataclasses/node_base.py | 14 +++++--- 5 files changed, 51 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index bf65d15..3178bbe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ dependencies = [ "neo-mamba==1.0.0", "paramiko>=2.10.3", "pexpect>=4.8.0", - "requests>=2.28.0", + "requests==2.28.1", "docstring_parser>=0.15", "testrail-api>=1.12.0", "pytest==7.1.2", diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 7a48a1d..fa4ee0a 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -208,6 +208,42 @@ class Cluster: def morph_chain(self) -> list[MorphChain]: return self.services(MorphChain) + def nodes(self, services: list[ServiceClass]) -> list[ClusterNode]: + """ + Resolve which cluster nodes hosting the specified services. + + Args: + services: list of services to resolve hosting cluster nodes. + + Returns: + list of cluster nodes which host specified services. + """ + + cluster_nodes = set() + for service in services: + cluster_nodes.update( + [node for node in self.cluster_nodes if node.service(type(service)) == service] + ) + + return list(cluster_nodes) + + def node(self, service: ServiceClass) -> ClusterNode: + """ + Resolve single cluster node hosting the specified service. + + Args: + services: list of services to resolve hosting cluster nodes. + + Returns: + list of cluster nodes which host specified services. + """ + + nodes = [node for node in self.cluster_nodes if node.service(type(service)) == service] + if not len(nodes): + raise RuntimeError(f"Cannot find service {service} on any node") + + return nodes[0] + def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]: """ Get all services in a cluster of specified type. diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index c6391f5..7304f5d 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -41,10 +41,10 @@ class ClusterStateController: provider = SshConnectionProvider() provider.drop(node.host_ip) + self.stopped_nodes.append(node) with reporter.step(f"Stop host {node.host.config.address}"): node.host.stop_host(mode=mode) wait_for_host_offline(self.shell, node.storage_node) - self.stopped_nodes.append(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Shutdown whole cluster") @@ -136,8 +136,8 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop storage service on {node}") def stop_storage_service(self, node: ClusterNode): - node.storage_node.stop_service() self.stopped_storage_nodes.append(node) + node.storage_node.stop_service() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop all {service_type} services") diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 9e6783c..6413ded 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -145,6 +145,9 @@ class StorageNode(NodeBase): def get_shard_config_path(self) -> str: return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH) + def get_shards_config(self) -> tuple[str, dict]: + return self.get_config(self.get_shard_config_path()) + def get_control_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 3b1964c..5352080 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -1,6 +1,6 @@ from abc import abstractmethod from dataclasses import dataclass -from typing import Optional, Tuple, TypedDict, TypeVar +from typing import Optional, TypedDict, TypeVar import yaml @@ -103,8 +103,10 @@ class NodeBase(HumanReadableABC): ConfigAttributes.WALLET_CONFIG, ) - def get_config(self) -> Tuple[str, dict]: - config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]: + if config_file_path is None: + config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + shell = self.host.get_shell() result = shell.exec(f"cat {config_file_path}") @@ -113,8 +115,10 @@ class NodeBase(HumanReadableABC): config = yaml.safe_load(config_text) return config_file_path, config - def save_config(self, new_config: dict) -> None: - config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None: + if config_file_path is None: + config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + shell = self.host.get_shell() config_str = yaml.dump(new_config) From f4111a1374e8c28f3908de2f59a7bcb3ae1c5bc6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 26 Oct 2023 13:34:42 +0300 Subject: [PATCH 066/274] [#103] Add host_status method to Host Signed-off-by: Andrey Berezin --- src/frostfs_testlib/hosting/docker_host.py | 22 +++++++++++++++---- src/frostfs_testlib/hosting/interfaces.py | 11 ++++++++++ src/frostfs_testlib/storage/constants.py | 1 + .../storage/dataclasses/node_base.py | 3 +++ 4 files changed, 33 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 289c94d..d582418 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -11,7 +11,7 @@ import docker from requests import HTTPError from frostfs_testlib.hosting.config import ParsedAttributes -from frostfs_testlib.hosting.interfaces import DiskInfo, Host +from frostfs_testlib.hosting.interfaces import DiskInfo, Host, HostStatus from frostfs_testlib.shell import LocalShell, Shell, SSHShell from frostfs_testlib.shell.command_inspectors import SudoInspector @@ -87,6 +87,15 @@ class DockerHost(Host): for service_config in self._config.services: self.start_service(service_config.name) + def get_host_status(self) -> HostStatus: + # We emulate host status by checking all services. + for service_config in self._config.services: + state = self._get_container_state(service_config.name) + if state != "running": + return HostStatus.OFFLINE + + return HostStatus.ONLINE + def stop_host(self) -> None: # We emulate stopping machine by stopping all services # As an alternative we can probably try to stop docker service... @@ -293,11 +302,16 @@ class DockerHost(Host): # To speed things up, we break timeout in smaller iterations and check container state # several times. This way waiting stops as soon as container reaches the expected state for _ in range(iterations): - container = self._get_container_by_name(container_name) - logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}") + state = self._get_container_state(container_name) - if container and container["State"] == expected_state: + if state == expected_state: return time.sleep(iteration_wait_time) raise RuntimeError(f"Container {container_name} is not in {expected_state} state.") + + def _get_container_state(self, container_name: str) -> str: + container = self._get_container_by_name(container_name) + logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}") + + return container.get("State", None) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 4c94ca0..4388791 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -4,6 +4,13 @@ from typing import Optional from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig from frostfs_testlib.shell.interfaces import Shell +from frostfs_testlib.testing.readable import HumanReadableEnum + + +class HostStatus(HumanReadableEnum): + ONLINE = "Online" + OFFLINE = "Offline" + UNKNOWN = "Unknown" class DiskInfo(dict): @@ -79,6 +86,10 @@ class Host(ABC): def start_host(self) -> None: """Starts the host machine.""" + @abstractmethod + def get_host_status(self) -> HostStatus: + """Check host status.""" + @abstractmethod def stop_host(self, mode: str) -> None: """Stops the host machine. diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index dbaac5a..2284ce3 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -10,6 +10,7 @@ class ConfigAttributes: ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" ENDPOINT_INTERNAL = "endpoint_internal0" + ENDPOINT_PROMETHEUS = "endpoint_prometheus" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" HTTP_HOSTNAME = "http_hostname" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 5352080..ecfe61c 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -64,6 +64,9 @@ class NodeBase(HumanReadableABC): def service_healthcheck(self) -> bool: """Service healthcheck.""" + def get_metrics_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) + def stop_service(self): with reporter.step(f"Stop {self.name} service on {self.host.config.address}"): self.host.stop_service(self.name) From 8a360683aeb770c3174e695105ab8bc09abee7d3 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 26 Oct 2023 17:31:33 +0300 Subject: [PATCH 067/274] [#104] Add mask/unmask for services Signed-off-by: Andrey Berezin --- src/frostfs_testlib/hosting/docker_host.py | 8 +++ src/frostfs_testlib/hosting/interfaces.py | 20 ++++++ src/frostfs_testlib/steps/node_management.py | 63 +------------------ .../controllers/cluster_state_controller.py | 14 +++-- .../storage/dataclasses/node_base.py | 9 ++- 5 files changed, 45 insertions(+), 69 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index d582418..0e4ea11 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -126,6 +126,14 @@ class DockerHost(Host): timeout=service_attributes.stop_timeout, ) + def mask_service(self, service_name: str) -> None: + # Not required for Docker + return + + def unmask_service(self, service_name: str) -> None: + # Not required for Docker + return + def wait_success_suspend_process(self, service_name: str): raise NotImplementedError("Not supported for docker") diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 4388791..84b7911 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -118,6 +118,26 @@ class Host(ABC): service_name: Name of the service to stop. """ + @abstractmethod + def mask_service(self, service_name: str) -> None: + """Prevent the service from start by any activity by masking it. + + The service must be hosted on this host. + + Args: + service_name: Name of the service to mask. + """ + + @abstractmethod + def unmask_service(self, service_name: str) -> None: + """Allow the service to start by any activity by unmasking it. + + The service must be hosted on this host. + + Args: + service_name: Name of the service to unmask. + """ + @abstractmethod def restart_service(self, service_name: str) -> None: """Restarts the service with specified name and waits until it starts. diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index 4b46b62..9c0c6b0 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -15,8 +15,7 @@ from frostfs_testlib.resources.cli import ( ) from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.epoch import tick_epoch -from frostfs_testlib.steps.epoch import wait_for_epochs_align +from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils import datetime_utils @@ -41,44 +40,6 @@ class HealthStatus: return HealthStatus(network, health) -@reporter.step_deco("Stop random storage nodes") -def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]: - """ - Shuts down the given number of randomly selected storage nodes. - Args: - number: the number of storage nodes to stop - nodes: the list of storage nodes to stop - Returns: - the list of nodes that were stopped - """ - nodes_to_stop = random.sample(nodes, number) - for node in nodes_to_stop: - node.stop_service() - return nodes_to_stop - - -@reporter.step_deco("Start storage node") -def start_storage_nodes(nodes: list[StorageNode]) -> None: - """ - The function starts specified storage nodes. - Args: - nodes: the list of nodes to start - """ - for node in nodes: - node.start_service() - - -@reporter.step_deco("Stop storage node") -def stop_storage_nodes(nodes: list[StorageNode]) -> None: - """ - The function starts specified storage nodes. - Args: - nodes: the list of nodes to start - """ - for node in nodes: - node.stop_service() - - @reporter.step_deco("Get Locode from random storage node") def get_locode_from_random_node(cluster: Cluster) -> str: node = random.choice(cluster.services(StorageNode)) @@ -329,25 +290,3 @@ def _run_control_command(node: StorageNode, command: str) -> None: f"--wallet {wallet_path} --config {wallet_config_path}" ) return result.stdout - - -@reporter.step_deco("Start services s3gate ") -def start_s3gates(cluster: Cluster) -> None: - """ - The function starts specified storage nodes. - Args: - cluster: cluster instance under test - """ - for gate in cluster.services(S3Gate): - gate.start_service() - - -@reporter.step_deco("Stop services s3gate ") -def stop_s3gates(cluster: Cluster) -> None: - """ - The function starts specified storage nodes. - Args: - cluster: cluster instance under test - """ - for gate in cluster.services(S3Gate): - gate.stop_service() diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 7304f5d..c18b8d8 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -135,9 +135,9 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop storage service on {node}") - def stop_storage_service(self, node: ClusterNode): + def stop_storage_service(self, node: ClusterNode, mask: bool = True): self.stopped_storage_nodes.append(node) - node.storage_node.stop_service() + node.storage_node.stop_service(mask) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop all {service_type} services") @@ -171,9 +171,11 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop {service_type} service on {node}") - def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): + def stop_service_of_type( + self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True + ): service = node.service(service_type) - service.stop_service() + service.stop_service(mask) self.stopped_services.add(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -207,8 +209,8 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop s3 gate on {node}") - def stop_s3_gate(self, node: ClusterNode): - node.s3_gate.stop_service() + def stop_s3_gate(self, node: ClusterNode, mask: bool = True): + node.s3_gate.stop_service(mask) self.stopped_s3_gates.append(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index ecfe61c..8708520 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -57,6 +57,9 @@ class NodeBase(HumanReadableABC): return self._process_name def start_service(self): + with reporter.step(f"Unmask {self.name} service on {self.host.config.address}"): + self.host.unmask_service(self.name) + with reporter.step(f"Start {self.name} service on {self.host.config.address}"): self.host.start_service(self.name) @@ -67,7 +70,11 @@ class NodeBase(HumanReadableABC): def get_metrics_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) - def stop_service(self): + def stop_service(self, mask: bool = True): + if mask: + with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): + self.host.mask_service(self.name) + with reporter.step(f"Stop {self.name} service on {self.host.config.address}"): self.host.stop_service(self.name) From 3af4dfd977cb60744060a51c184b3b48400965ea Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Fri, 27 Oct 2023 10:56:27 +0300 Subject: [PATCH 068/274] multipart scenario Signed-off-by: m.malygina --- src/frostfs_testlib/load/load_config.py | 16 +++++++++++++--- src/frostfs_testlib/load/load_metrics.py | 1 + src/frostfs_testlib/load/load_report.py | 1 + 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 678fc38..a5d8535 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -19,6 +19,7 @@ class LoadScenario(Enum): gRPC_CAR = "grpc_car" S3 = "s3" S3_CAR = "s3_car" + S3_MULTIPART = "s3_multipart" HTTP = "http" VERIFY = "verify" LOCAL = "local" @@ -37,10 +38,11 @@ all_load_scenarios = [ LoadScenario.S3_CAR, LoadScenario.gRPC_CAR, LoadScenario.LOCAL, + LoadScenario.S3_MULTIPART ] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] -constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL] +constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] grpc_preset_scenarios = [ @@ -49,7 +51,7 @@ grpc_preset_scenarios = [ LoadScenario.gRPC_CAR, LoadScenario.LOCAL, ] -s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR] +s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART] @dataclass @@ -172,7 +174,7 @@ class LoadParams: k6_url: Optional[str] = None # No ssl verification flag no_verify_ssl: Optional[bool] = metadata_field( - [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.VERIFY, LoadScenario.HTTP], + [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.VERIFY, LoadScenario.HTTP], "no-verify-ssl", "NO_VERIFY_SSL", False, @@ -258,6 +260,14 @@ class LoadParams: constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True ) + # Multipart + # Number of parts to upload in parallel + writers_multipart: Optional[int] = metadata_field( + [LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True + ) + # part size must be greater than (5 MB) + write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False) + # Period of time to apply the rate value. time_unit: Optional[str] = metadata_field( constant_arrival_rate_scenarios, None, "TIME_UNIT", False diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 6c201ec..474a96b 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -196,6 +196,7 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr LoadScenario.HTTP: GrpcMetrics, LoadScenario.S3: S3Metrics, LoadScenario.S3_CAR: S3Metrics, + LoadScenario.S3_MULTIPART: S3Metrics, LoadScenario.VERIFY: VerifyMetrics, LoadScenario.LOCAL: LocalMetrics, } diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index ec6d539..b648bc2 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -92,6 +92,7 @@ class LoadReport: model_map = { LoadScenario.gRPC: "closed model", LoadScenario.S3: "closed model", + LoadScenario.S3_MULTIPART: "closed model", LoadScenario.HTTP: "closed model", LoadScenario.gRPC_CAR: "open model", LoadScenario.S3_CAR: "open model", From f3c160f313c5c5926643ec5b81400b56705705ed Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 27 Oct 2023 14:10:01 +0300 Subject: [PATCH 069/274] [#107] Add passwd change protection for local runner --- src/frostfs_testlib/load/runners.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 489ddcd..b65f129 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -285,6 +285,7 @@ class LocalRunner(RunnerBase): self.cluster_state_controller = cluster_state_controller self.file_keeper = file_keeper self.loaders = [NodeLoader(node) for node in nodes_under_load] + self.nodes_under_load = nodes_under_load @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Preparation steps") @@ -301,6 +302,7 @@ class LocalRunner(RunnerBase): with reporter.step("Allow storage user to login into system"): shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") + shell.exec("sudo chattr +i /etc/passwd") with reporter.step("Update limits.conf"): limits_path = "/etc/security/limits.conf" @@ -381,6 +383,13 @@ class LocalRunner(RunnerBase): for k6_instance in self.k6_instances: k6_instance.stop() + @reporter.step_deco("Restore passwd on {cluster_node}") + def restore_passwd_attr_on_node(cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("sudo chattr -i /etc/passwd") + + parallel(restore_passwd_attr_on_node, self.nodes_under_load) + self.cluster_state_controller.start_stopped_storage_services() self.cluster_state_controller.start_stopped_s3_gates() From 137fd2156145572b2af0fbbb4005f95314ac5d0a Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 27 Oct 2023 13:36:32 +0300 Subject: [PATCH 070/274] Add local shell and small fix Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/s3/aws_cli_client.py | 107 +++++++++--------- src/frostfs_testlib/steps/http/http_gate.py | 70 +++++++++--- .../controllers/cluster_state_controller.py | 9 +- src/frostfs_testlib/utils/cli_utils.py | 49 -------- 4 files changed, 113 insertions(+), 122 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 2e61679..dbece66 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -14,13 +14,15 @@ from frostfs_testlib.resources.common import ( S3_SYNC_WAIT_TIME, ) from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.shell import CommandOptions +from frostfs_testlib.shell.local_shell import LocalShell # TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import _cmd_run, _configure_aws_cli +from frostfs_testlib.utils.cli_utils import _configure_aws_cli reporter = get_reporter() logger = logging.getLogger("NeoLogger") -LONG_TIMEOUT = 240 +command_options = CommandOptions(timeout=240) class AwsCliClient(S3ClientWrapper): @@ -34,10 +36,13 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Configure S3 client (aws cli)") def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: self.s3gate_endpoint = s3gate_endpoint + self.local_shell = LocalShell() try: _configure_aws_cli("aws configure", access_key_id, secret_access_key) - _cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") - _cmd_run(f"aws configure set retry_mode {RETRY_MODE}") + self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") + self.local_shell.exec( + f"aws configure set retry_mode {RETRY_MODE}", + ) except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err @@ -79,7 +84,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-read {grant_read}" if location_constraint: cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" - _cmd_run(cmd) + self.local_shell.exec(cmd) sleep(S3_SYNC_WAIT_TIME) return bucket @@ -87,20 +92,20 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List buckets S3") def list_buckets(self) -> list[str]: cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}" - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout buckets_json = self._to_json(output) return [bucket["Name"] for bucket in buckets_json["Buckets"]] @reporter.step_deco("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" - _cmd_run(cmd, LONG_TIMEOUT) + self.local_shell.exec(cmd, command_options) sleep(S3_SYNC_WAIT_TIME) @reporter.step_deco("Head bucket S3") def head_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put bucket versioning status") def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: @@ -109,7 +114,7 @@ class AwsCliClient(S3ClientWrapper): f"--versioning-configuration Status={status.value} " f"--endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get bucket versioning status") def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: @@ -117,7 +122,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Status") @@ -130,7 +135,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: @@ -138,7 +143,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("TagSet") @@ -148,7 +153,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -158,7 +163,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("LocationConstraint") @@ -168,7 +173,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) obj_list = [obj["Key"] for obj in response.get("Contents", [])] @@ -182,7 +187,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) obj_list = [obj["Key"] for obj in response.get("Contents", [])] @@ -196,7 +201,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response if full_output else response.get("Versions", []) @@ -206,7 +211,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response if full_output else response.get("DeleteMarkers", []) @@ -245,7 +250,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --tagging-directive {tagging_directive}" if tagging: cmd += f" --tagging {tagging}" - _cmd_run(cmd, LONG_TIMEOUT) + self.local_shell.exec(cmd, command_options) return key @reporter.step_deco("Put object S3") @@ -288,7 +293,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-full-control '{grant_full_control}'" if grant_read: cmd += f" --grant-read {grant_read}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) return response.get("VersionId") @@ -299,7 +304,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " f"{version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response @@ -320,7 +325,7 @@ class AwsCliClient(S3ClientWrapper): ) if object_range: cmd += f" --range bytes={object_range[0]}-{object_range[1]}" - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response if full_output else file_path @@ -331,7 +336,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " f"{version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -354,7 +359,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-write {grant_write}" if grant_read: cmd += f" --grant-read {grant_read}" - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -376,7 +381,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-write {grant_write}" if grant_read: cmd += f" --grant-read {grant_read}" - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Delete objects S3") def delete_objects(self, bucket: str, keys: list[str]) -> dict: @@ -390,7 +395,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) sleep(S3_SYNC_WAIT_TIME) return response @@ -402,7 +407,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-object --bucket {bucket} " f"--key {key} {version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) @@ -429,7 +434,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) @@ -462,7 +467,7 @@ class AwsCliClient(S3ClientWrapper): f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) for attr in attributes: @@ -479,7 +484,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Policy") @@ -496,7 +501,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: @@ -504,7 +509,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("CORSRules") @@ -514,7 +519,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: @@ -522,7 +527,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: @@ -530,7 +535,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object retention") def put_object_retention( @@ -548,7 +553,7 @@ class AwsCliClient(S3ClientWrapper): ) if bypass_governance_retention is not None: cmd += " --bypass-governance-retention" - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object legal hold") def put_object_legal_hold( @@ -564,7 +569,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object tagging") def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: @@ -574,7 +579,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get object tagging") def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: @@ -583,7 +588,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " f"{version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("TagSet") @@ -593,7 +598,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " f"--key {key} --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Sync directory S3") def sync( @@ -613,7 +618,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" {key}={value}" if acl: cmd += f" --acl {acl}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) @reporter.step_deco("CP directory S3") @@ -634,7 +639,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" {key}={value}" if acl: cmd += f" --acl {acl}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) @reporter.step_deco("Create multipart upload S3") @@ -643,7 +648,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " f"--key {key} --endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" @@ -656,7 +661,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Uploads") @@ -666,7 +671,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Upload part S3") def upload_part( @@ -677,7 +682,7 @@ class AwsCliClient(S3ClientWrapper): f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] @@ -691,7 +696,7 @@ class AwsCliClient(S3ClientWrapper): f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) assert response.get("CopyPartResult", []).get( "ETag" @@ -705,7 +710,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) assert response.get("Parts"), f"Expected Parts in response:\n{response}" @@ -727,7 +732,7 @@ class AwsCliClient(S3ClientWrapper): f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " f"--endpoint-url {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: @@ -735,7 +740,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout return self._to_json(output) @reporter.step_deco("Get object lock configuration") @@ -744,7 +749,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("ObjectLockConfiguration") diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 8080689..2b70d6c 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -12,12 +12,13 @@ import requests from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE -from frostfs_testlib.s3.aws_cli_client import LONG_TIMEOUT +from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell +from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object from frostfs_testlib.storage.cluster import StorageNode -from frostfs_testlib.utils.cli_utils import _cmd_run +from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils.file_utils import get_file_hash reporter = get_reporter() @@ -25,6 +26,7 @@ reporter = get_reporter() logger = logging.getLogger("NeoLogger") ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") +local_shell = LocalShell() @reporter.step_deco("Get via HTTP Gate") @@ -51,7 +53,9 @@ def get_via_http_gate( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False) + resp = requests.get( + request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False + ) if not resp.ok: raise Exception( @@ -72,7 +76,9 @@ def get_via_http_gate( @reporter.step_deco("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300): +def get_via_zip_http_gate( + cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300 +): """ This function gets given object from HTTP gate cid: container id to get object from @@ -130,7 +136,9 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}) + resp = requests.get( + request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname} + ) if not resp.ok: raise Exception( @@ -165,7 +173,9 @@ def upload_via_http_gate( request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) + resp = requests.post( + request, files=files, data=body, headers=headers, timeout=timeout, verify=False + ) if not resp.ok: raise Exception( @@ -223,16 +233,16 @@ def upload_via_http_gate_curl( large_object = is_object_large(filepath) if large_object: # pre-clean - _cmd_run("rm pipe -f") + local_shell.exec("rm pipe -f") files = f"file=@pipe;filename={os.path.basename(filepath)}" cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = local_shell.exec(cmd, command_options) # clean up pipe - _cmd_run("rm pipe") + local_shell.exec("rm pipe") else: files = f"file=@{filepath};filename={os.path.basename(filepath)}" cmd = f"curl -k -F '{files}' {attributes} {request}" - output = _cmd_run(cmd) + output = local_shell.exec(cmd) if error_pattern: match = error_pattern.casefold() in str(output).casefold() @@ -245,6 +255,7 @@ def upload_via_http_gate_curl( return oid_re.group(1) +@retry(max_attempts=3, sleep_interval=1) @reporter.step_deco("Get via HTTP Gate using Curl") def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: """ @@ -257,8 +268,8 @@ def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> request = f"{endpoint}/get/{cid}/{oid}" file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") - cmd = f"curl -k -H \"Host: {http_hostname}\" {request} > {file_path}" - _cmd_run(cmd) + cmd = f'curl -k -H "Host: {http_hostname}" {request} > {file_path}' + local_shell.exec(cmd) return file_path @@ -271,7 +282,11 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): @reporter.step_deco("Try to get object and expect error") def try_to_get_object_and_expect_error( - cid: str, oid: str, error_pattern: str, endpoint: str, http_hostname: str, + cid: str, + oid: str, + error_pattern: str, + endpoint: str, + http_hostname: str, ) -> None: try: get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) @@ -283,9 +298,16 @@ def try_to_get_object_and_expect_error( @reporter.step_deco("Verify object can be get using HTTP header attribute") def get_object_by_attr_and_verify_hashes( - oid: str, file_name: str, cid: str, attrs: dict, endpoint: str, http_hostname: str, + oid: str, + file_name: str, + cid: str, + attrs: dict, + endpoint: str, + http_hostname: str, ) -> None: - got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) + got_file_path_http = get_via_http_gate( + cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname + ) got_file_path_http_attr = get_via_http_gate_by_attribute( cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname ) @@ -326,7 +348,9 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) + got_file_path_http = object_getter( + cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname + ) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -369,10 +393,20 @@ def try_to_get_object_via_passed_request_and_expect_error( ) -> None: try: if attrs is None: - get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path, http_hostname=http_hostname) + get_via_http_gate( + cid=cid, + oid=oid, + endpoint=endpoint, + request_path=http_request_path, + http_hostname=http_hostname, + ) else: get_via_http_gate_by_attribute( - cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path, http_hostname=http_hostname + cid=cid, + attribute=attrs, + endpoint=endpoint, + request_path=http_request_path, + http_hostname=http_hostname, ) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index c18b8d8..deb8c7f 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -241,10 +241,11 @@ class ClusterStateController: @reporter.step_deco("Resume {process_name} service in {node}") def resume_service(self, process_name: str, node: ClusterNode): node.host.wait_success_resume_process(process_name) - if self.suspended_services.get(process_name): - self.suspended_services[process_name].append(node) - else: - self.suspended_services[process_name] = [node] + if ( + self.suspended_services.get(process_name) + and node in self.suspended_services[process_name] + ): + self.suspended_services[process_name].remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start suspend processes services") diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 0fa6cde..e1dfcd1 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -28,55 +28,6 @@ COLOR_GREEN = "\033[92m" COLOR_OFF = "\033[0m" -def _cmd_run(cmd: str, timeout: int = 90) -> str: - """ - Runs given shell command , in case of success returns its stdout, - in case of failure returns error message. - """ - compl_proc = None - start_time = datetime.now() - try: - logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}") - start_time = datetime.utcnow() - compl_proc = subprocess.run( - cmd, - check=True, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - timeout=timeout, - shell=True, - ) - output = compl_proc.stdout - return_code = compl_proc.returncode - end_time = datetime.utcnow() - logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}") - _attach_allure_log(cmd, output, return_code, start_time, end_time) - - return output - except subprocess.CalledProcessError as exc: - logger.info( - f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode} " f"\nOutput: {exc.output}" - ) - end_time = datetime.now() - return_code, cmd_output = subprocess.getstatusoutput(cmd) - _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) - - raise RuntimeError( - f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode}\n" f"Output: {exc.output}" - ) from exc - except OSError as exc: - raise RuntimeError(f"Command: {cmd}\n" f"Output: {exc.strerror}") from exc - except Exception as exc: - return_code, cmd_output = subprocess.getstatusoutput(cmd) - end_time = datetime.now() - _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) - logger.info( - f"Command: {cmd}\n" f"Error:\nreturn code: {return_code}\n" f"Output: {cmd_output}" - ) - raise - - def _run_with_passwd(cmd: str) -> str: child = pexpect.spawn(cmd) child.delaybeforesend = 1 From 8ee2985c899dd8dd1a72d157e6ac7604a94390b3 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 30 Oct 2023 14:37:23 +0300 Subject: [PATCH 071/274] [#108] Update user with couple retries Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/runners.py | 89 +++++++++++++++++------------ 1 file changed, 54 insertions(+), 35 deletions(-) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index b65f129..635247e 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -3,7 +3,6 @@ import itertools import math import re import time -from concurrent.futures import ThreadPoolExecutor from dataclasses import fields from typing import Optional from urllib.parse import urlparse @@ -24,12 +23,14 @@ from frostfs_testlib.resources.load_params import ( LOAD_NODE_SSH_USER, LOAD_NODES, ) +from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel, run_optionally +from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import FileKeeper, datetime_utils reporter = get_reporter() @@ -296,40 +297,53 @@ class LocalRunner(RunnerBase): nodes_under_load: list[ClusterNode], k6_dir: str, ): - @reporter.step_deco("Prepare node {cluster_node}") - def prepare_node(cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() + parallel(self.prepare_node, nodes_under_load, k6_dir, load_params) - with reporter.step("Allow storage user to login into system"): - shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") - shell.exec("sudo chattr +i /etc/passwd") + @retry(3, 5, expected_result=True) + def allow_user_to_login_in_system(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() - with reporter.step("Update limits.conf"): - limits_path = "/etc/security/limits.conf" - self.file_keeper.add(cluster_node.storage_node, limits_path) - content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" - shell.exec(f"echo '{content}' | sudo tee {limits_path}") + result = None + try: + shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") + self.lock_passwd_on_node(cluster_node) + options = CommandOptions(check=False, extra_inspectors=[SuInspector(STORAGE_USER_NAME)]) + result = shell.exec("whoami", options) + finally: + if not result or result.return_code: + self.restore_passwd_on_node(cluster_node) + return False - with reporter.step("Download K6"): - shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") - shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") - shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}") - shell.exec(f"sudo chmod -R 777 {k6_dir}") + return True - with reporter.step("Create empty_passwd"): - self.wallet = WalletInfo( - f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml" - ) - content = yaml.dump({"password": ""}) - shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') - shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") + @reporter.step_deco("Prepare node {cluster_node}") + def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams): + shell = cluster_node.host.get_shell() - with ThreadPoolExecutor(max_workers=len(nodes_under_load)) as executor: - result = executor.map(prepare_node, nodes_under_load) + with reporter.step("Allow storage user to login into system"): + self.allow_user_to_login_in_system(cluster_node) - # Check for exceptions - for _ in result: - pass + with reporter.step("Update limits.conf"): + limits_path = "/etc/security/limits.conf" + self.file_keeper.add(cluster_node.storage_node, limits_path) + content = ( + f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" + ) + shell.exec(f"echo '{content}' | sudo tee {limits_path}") + + with reporter.step("Download K6"): + shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") + shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") + shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}") + shell.exec(f"sudo chmod -R 777 {k6_dir}") + + with reporter.step("Create empty_passwd"): + self.wallet = WalletInfo( + f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml" + ) + content = yaml.dump({"password": ""}) + shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') + shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") @reporter.step_deco("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): @@ -379,16 +393,21 @@ class LocalRunner(RunnerBase): ): time.sleep(wait_after_start_time) + @reporter.step_deco("Restore passwd on {cluster_node}") + def restore_passwd_on_node(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("sudo chattr -i /etc/passwd") + + @reporter.step_deco("Lock passwd on {cluster_node}") + def lock_passwd_on_node(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("sudo chattr +i /etc/passwd") + def stop(self): for k6_instance in self.k6_instances: k6_instance.stop() - @reporter.step_deco("Restore passwd on {cluster_node}") - def restore_passwd_attr_on_node(cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("sudo chattr -i /etc/passwd") - - parallel(restore_passwd_attr_on_node, self.nodes_under_load) + parallel(self.restore_passwd_on_node, self.nodes_under_load) self.cluster_state_controller.start_stopped_storage_services() self.cluster_state_controller.start_stopped_s3_gates() From e970fe2788949673913d16a73fbbb738829a9515 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 31 Oct 2023 14:17:54 +0300 Subject: [PATCH 072/274] [#109] Update CSC with healthchecks --- .../healthcheck/basic_healthcheck.py | 35 +++- src/frostfs_testlib/healthcheck/interfaces.py | 8 + src/frostfs_testlib/load/runners.py | 3 +- .../controllers/cluster_state_controller.py | 180 +++++++++++------- .../storage/dataclasses/node_base.py | 7 + src/frostfs_testlib/utils/__init__.py | 3 - 6 files changed, 158 insertions(+), 78 deletions(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 3f4bc79..9c1d151 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -1,5 +1,7 @@ +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.storage.cluster import ClusterNode @@ -9,6 +11,33 @@ reporter = get_reporter() class BasicHealthcheck(Healthcheck): @reporter.step_deco("Perform healthcheck for {cluster_node}") def perform(self, cluster_node: ClusterNode): - health_check = storage_node_healthcheck(cluster_node.storage_node) - if health_check.health_status != "READY" or health_check.network_status != "ONLINE": - raise AssertionError("Node {cluster_node} is not healthy") + result = self.storage_healthcheck(cluster_node) + if result: + raise AssertionError(result) + + @reporter.step_deco("Tree healthcheck on {cluster_node}") + def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: + host = cluster_node.host + service_config = host.get_service_config(cluster_node.storage_node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + remote_cli = FrostfsCli( + shell, + host.get_cli_config(FROSTFS_CLI_EXEC).exec_path, + config_file=wallet_config_path, + ) + result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080") + if result.return_code != 0: + return f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" + + @reporter.step_deco("Storage healthcheck on {cluster_node}") + def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: + result = storage_node_healthcheck(cluster_node.storage_node) + if result.health_status != "READY" or result.network_status != "ONLINE": + return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py index 0c77957..a036a82 100644 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -7,3 +7,11 @@ class Healthcheck(ABC): @abstractmethod def perform(self, cluster_node: ClusterNode): """Perform healthcheck on the target cluster node""" + + @abstractmethod + def tree_healthcheck(self, cluster_node: ClusterNode): + """Check tree sync status on target cluster node""" + + @abstractmethod + def storage_healthcheck(self, cluster_node: ClusterNode): + """Perform storage node healthcheck on target cluster node""" diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 635247e..4c07100 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -31,7 +31,8 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, Storage from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel, run_optionally from frostfs_testlib.testing.test_control import retry -from frostfs_testlib.utils import FileKeeper, datetime_utils +from frostfs_testlib.utils import datetime_utils +from frostfs_testlib.utils.file_keeper import FileKeeper reporter = get_reporter() diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index deb8c7f..2cf1451 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,15 +1,15 @@ -import copy import time import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper -from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode +from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.testing import parallel -from frostfs_testlib.testing.test_control import run_optionally +from frostfs_testlib.testing.test_control import run_optionally, wait_for_success from frostfs_testlib.utils.failover_utils import ( wait_all_storage_nodes_returned, wait_for_host_offline, @@ -22,18 +22,36 @@ if_up_down_helper = IfUpDownHelper() class ClusterStateController: - def __init__(self, shell: Shell, cluster: Cluster) -> None: + def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} - self.stopped_storage_nodes: list[ClusterNode] = [] - self.stopped_s3_gates: list[ClusterNode] = [] self.dropped_traffic: list[ClusterNode] = [] self.stopped_services: set[NodeBase] = set() self.cluster = cluster + self.healthcheck = healthcheck self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} self.nodes_with_modified_interface: list[ClusterNode] = [] + def _get_stopped_by_node(self, node: ClusterNode) -> set[NodeBase]: + stopped_by_node = [svc for svc in self.stopped_services if svc.host == node.host] + return set(stopped_by_node) + + def _get_stopped_by_type(self, service_type: type[ServiceClass]) -> set[ServiceClass]: + stopped_by_type = [svc for svc in self.stopped_services if isinstance(svc, service_type)] + return set(stopped_by_type) + + def _from_stopped_nodes(self, service_type: type[ServiceClass]) -> set[ServiceClass]: + stopped_on_nodes = set([node.service(service_type) for node in self.stopped_nodes]) + return set(stopped_on_nodes) + + def _get_online(self, service_type: type[ServiceClass]) -> set[ServiceClass]: + stopped_svc = self._get_stopped_by_type(service_type).union( + self._from_stopped_nodes(service_type) + ) + online_svc = set(self.cluster.services(service_type)) - stopped_svc + return online_svc + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop host of node {node}") def stop_node_host(self, node: ClusterNode, mode: str): @@ -65,26 +83,6 @@ class ClusterStateController: for node in nodes: wait_for_host_offline(self.shell, node.storage_node) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all storage services on cluster") - def stop_all_storage_services(self, reversed_order: bool = False): - nodes = ( - reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - ) - - for node in nodes: - self.stop_storage_service(node) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all S3 gates on cluster") - def stop_all_s3_gates(self, reversed_order: bool = False): - nodes = ( - reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - ) - - for node in nodes: - self.stop_s3_gate(node) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start host of node {node}") def start_node_host(self, node: ClusterNode): @@ -104,13 +102,10 @@ class ClusterStateController: for node in nodes: with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() - if node in self.stopped_storage_nodes: - self.stopped_storage_nodes.remove(node) + self.stopped_services.difference_update(self._get_stopped_by_node(node)) - if node in self.stopped_s3_gates: - self.stopped_s3_gates.remove(node) self.stopped_nodes = [] - wait_all_storage_nodes_returned(self.shell, self.cluster) + self.wait_after_storage_startup() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}") @@ -133,42 +128,57 @@ class ClusterStateController: disk_controller.attach() self.detached_disks = {} - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop storage service on {node}") - def stop_storage_service(self, node: ClusterNode, mask: bool = True): - self.stopped_storage_nodes.append(node) - node.storage_node.stop_service(mask) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop all {service_type} services") - def stop_services_of_type(self, service_type: type[ServiceClass]): + def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True): services = self.cluster.services(service_type) self.stopped_services.update(services) - parallel([service.stop_service for service in services]) + parallel([service.stop_service for service in services], mask=mask) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start all {service_type} services") def start_services_of_type(self, service_type: type[ServiceClass]): services = self.cluster.services(service_type) parallel([service.start_service for service in services]) + self.stopped_services.difference_update(set(services)) if service_type == StorageNode: - wait_all_storage_nodes_returned(self.shell, self.cluster) + self.wait_after_storage_startup() - self.stopped_services = self.stopped_services - set(services) + @wait_for_success(600, 60) + def wait_s3gate(self, s3gate: S3Gate): + with reporter.step(f"Wait for {s3gate} reconnection"): + result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") + assert ( + 'address="127.0.0.1' in result.stdout + ), "S3Gate should connect to local storage node" + + @reporter.step_deco("Wait for S3Gates reconnection to local storage") + def wait_s3gates(self): + online_s3gates = self._get_online(S3Gate) + parallel(self.wait_s3gate, online_s3gates) + + @wait_for_success(600, 60) + def wait_tree_healthcheck(self): + nodes = self.cluster.nodes(self._get_online(StorageNode)) + parallel(self.healthcheck.tree_healthcheck, nodes) + + @reporter.step_deco("Wait for storage reconnection to the system") + def wait_after_storage_startup(self): + wait_all_storage_nodes_returned(self.shell, self.cluster) + self.wait_s3gates() + self.wait_tree_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start all stopped services") def start_all_stopped_services(self): + stopped_storages = self._get_stopped_by_type(StorageNode) parallel([service.start_service for service in self.stopped_services]) - - for service in self.stopped_services: - if isinstance(service, StorageNode): - wait_all_storage_nodes_returned(self.shell, self.cluster) - break - self.stopped_services.clear() + if stopped_storages: + self.wait_after_storage_startup() + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop {service_type} service on {node}") def stop_service_of_type( @@ -183,50 +193,78 @@ class ClusterStateController: def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): service = node.service(service_type) service.start_service() - if service in self.stopped_services: - self.stopped_services.remove(service) + self.stopped_services.discard(service) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start all stopped {service_type} services") + def start_stopped_services_of_type(self, service_type: type[ServiceClass]): + stopped_svc = self._get_stopped_by_type(service_type) + if not stopped_svc: + return + + parallel([svc.start_service for svc in stopped_svc]) + self.stopped_services.difference_update(stopped_svc) + + if service_type == StorageNode: + self.wait_after_storage_startup() + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop all storage services on cluster") + def stop_all_storage_services(self, reversed_order: bool = False): + nodes = ( + reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + ) + + for node in nodes: + self.stop_service_of_type(node, StorageNode) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop all S3 gates on cluster") + def stop_all_s3_gates(self, reversed_order: bool = False): + nodes = ( + reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + ) + + for node in nodes: + self.stop_service_of_type(node, S3Gate) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop storage service on {node}") + def stop_storage_service(self, node: ClusterNode, mask: bool = True): + self.stop_service_of_type(node, StorageNode, mask) + + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start storage service on {node}") def start_storage_service(self, node: ClusterNode): - node.storage_node.start_service() - self.stopped_storage_nodes.remove(node) + self.start_service_of_type(node, StorageNode) + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped storage services") def start_stopped_storage_services(self): - if not self.stopped_storage_nodes: - return - - # In case if we stopped couple services, for example (s01-s04): - # After starting only s01, it may require connections to s02-s04, which is still down, and fail to start. - # Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state. - # So in order to make sure that services are at least attempted to be started, using parallel runs here. - parallel(self.start_storage_service, copy.copy(self.stopped_storage_nodes)) - - wait_all_storage_nodes_returned(self.shell, self.cluster) - self.stopped_storage_nodes = [] + self.start_stopped_services_of_type(StorageNode) + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop s3 gate on {node}") def stop_s3_gate(self, node: ClusterNode, mask: bool = True): - node.s3_gate.stop_service(mask) - self.stopped_s3_gates.append(node) + self.stop_service_of_type(node, S3Gate, mask) + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start s3 gate on {node}") def start_s3_gate(self, node: ClusterNode): - node.s3_gate.start_service() - self.stopped_s3_gates.remove(node) + self.start_service_of_type(node, S3Gate) + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped S3 gates") def start_stopped_s3_gates(self): - if not self.stopped_s3_gates: - return - - parallel(self.start_s3_gate, copy.copy(self.stopped_s3_gates)) - self.stopped_s3_gates = [] + self.start_stopped_services_of_type(S3Gate) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Suspend {process_name} service in {node}") diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 8708520..1e23c7e 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -7,6 +7,7 @@ import yaml from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils @@ -67,6 +68,12 @@ class NodeBase(HumanReadableABC): def service_healthcheck(self) -> bool: """Service healthcheck.""" + # TODO: Migrate to sub-class Metrcis (not yet exists :)) + def get_metric(self, metric: str) -> CommandResult: + shell = self.host.get_shell() + result = shell.exec(f"curl -s {self.get_metrics_endpoint()} | grep -e '^{metric}'") + return result + def get_metrics_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index 0ac903a..fbc4a8f 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -3,6 +3,3 @@ import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.json_utils import frostfs_testlib.utils.string_utils import frostfs_testlib.utils.wallet_utils - -# TODO: Circullar dependency FileKeeper -> NodeBase -> Utils -> FileKeeper -> NodeBase -from frostfs_testlib.utils.file_keeper import FileKeeper From 03c45d7592979d3f09b0c05b5bd7921139e382de Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 31 Oct 2023 18:17:21 +0300 Subject: [PATCH 073/274] [#110] Move chattr call after get_results call Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/runners.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 4c07100..9859256 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -408,8 +408,6 @@ class LocalRunner(RunnerBase): for k6_instance in self.k6_instances: k6_instance.stop() - parallel(self.restore_passwd_on_node, self.nodes_under_load) - self.cluster_state_controller.start_stopped_storage_services() self.cluster_state_controller.start_stopped_s3_gates() @@ -419,4 +417,6 @@ class LocalRunner(RunnerBase): result = k6_instance.get_results() results[k6_instance.loader.ip] = result + parallel(self.restore_passwd_on_node, self.nodes_under_load) + return results From 1f50166e78845aee6e94bb1a448eb78add2f4e98 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 2 Nov 2023 11:13:34 +0300 Subject: [PATCH 074/274] Add method for work time Signed-off-by: Dmitriy Zayakin --- pyproject.toml | 4 +- .../controllers/cluster_state_controller.py | 73 ++++++++++++------- 2 files changed, 50 insertions(+), 27 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3178bbe..ba38c03 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,10 +50,10 @@ basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" [tool.isort] profile = "black" src_paths = ["src", "tests"] -line_length = 100 +line_length = 120 [tool.black] -line-length = 100 +line-length = 120 target-version = ["py310"] [tool.bumpver] diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 2cf1451..473af10 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,3 +1,4 @@ +import datetime import time import frostfs_testlib.resources.optionals as optionals @@ -46,9 +47,7 @@ class ClusterStateController: return set(stopped_on_nodes) def _get_online(self, service_type: type[ServiceClass]) -> set[ServiceClass]: - stopped_svc = self._get_stopped_by_type(service_type).union( - self._from_stopped_nodes(service_type) - ) + stopped_svc = self._get_stopped_by_type(service_type).union(self._from_stopped_nodes(service_type)) online_svc = set(self.cluster.services(service_type)) - stopped_svc return online_svc @@ -67,9 +66,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Shutdown whole cluster") def shutdown_cluster(self, mode: str, reversed_order: bool = False): - nodes = ( - reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - ) + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes # Drop all ssh connections before shutdown provider = SshConnectionProvider() @@ -149,9 +146,7 @@ class ClusterStateController: def wait_s3gate(self, s3gate: S3Gate): with reporter.step(f"Wait for {s3gate} reconnection"): result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") - assert ( - 'address="127.0.0.1' in result.stdout - ), "S3Gate should connect to local storage node" + assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node" @reporter.step_deco("Wait for S3Gates reconnection to local storage") def wait_s3gates(self): @@ -181,9 +176,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop {service_type} service on {node}") - def stop_service_of_type( - self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True - ): + def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): service = node.service(service_type) service.stop_service(mask) self.stopped_services.add(service) @@ -212,9 +205,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop all storage services on cluster") def stop_all_storage_services(self, reversed_order: bool = False): - nodes = ( - reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - ) + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes for node in nodes: self.stop_service_of_type(node, StorageNode) @@ -223,9 +214,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop all S3 gates on cluster") def stop_all_s3_gates(self, reversed_order: bool = False): - nodes = ( - reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - ) + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes for node in nodes: self.stop_service_of_type(node, S3Gate) @@ -279,10 +268,7 @@ class ClusterStateController: @reporter.step_deco("Resume {process_name} service in {node}") def resume_service(self, process_name: str, node: ClusterNode): node.host.wait_success_resume_process(process_name) - if ( - self.suspended_services.get(process_name) - and node in self.suspended_services[process_name] - ): + if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]: self.suspended_services[process_name].remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -388,9 +374,46 @@ class ClusterStateController: for node in self.nodes_with_modified_interface: if_up_down_helper.up_all_interface(node) - def _get_disk_controller( - self, node: StorageNode, device: str, mountpoint: str - ) -> DiskController: + @reporter.step_deco("Get node time") + def get_node_date(self, node: ClusterNode) -> datetime: + shell = node.host.get_shell() + return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") + + @reporter.step_deco("Set node time to {in_date}") + def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: + shell = node.host.get_shell() + shell.exec(f"hwclock --set --date='{in_date}'") + shell.exec("hwclock --hctosys") + node_time = self.get_node_date(node) + with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): + assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) + + @reporter.step_deco(f"Restore time") + def restore_node_date(self, node: ClusterNode) -> None: + shell = node.host.get_shell() + now_time = datetime.datetime.now(datetime.timezone.utc) + with reporter.step(f"Set {now_time} time"): + shell.exec(f"hwclock --set --date='{now_time}'") + shell.exec("hwclock --hctosys") + + @reporter.step_deco("Change the synchronizer status to {status}") + def set_sync_date_all_nodes(self, status: str): + if status == "active": + parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) + return + parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) + + def _enable_date_synchronizer(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("timedatectl set-ntp true") + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 5) + + def _disable_date_synchronizer(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("timedatectl set-ntp false") + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 5) + + def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: disk_controller_id = DiskController.get_id(node, device) if disk_controller_id in self.detached_disks.keys(): disk_controller = self.detached_disks[disk_controller_id] From c8227e80afb613caa9d98cd3c1d79e7c0df71c62 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Thu, 2 Nov 2023 19:18:31 +0300 Subject: [PATCH 075/274] update-remaining time --- src/frostfs_testlib/load/k6.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index e7a2b39..e46221e 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -180,7 +180,9 @@ class K6: while timeout > 0: if not self._k6_process.running(): return - logger.info(f"K6 is running. Waiting {wait_interval} seconds...") + remaining_time_hours = f"{timeout//3600}h" if timeout//3600 != 0 else "" + remaining_time_minutes = f"{timeout//60%60}m" if timeout//60%60 != 0 else "" + logger.info(f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds...") sleep(wait_interval) timeout -= min(timeout, wait_interval) wait_interval = max( From f8562da7e03e1c06716b171896bb605790f02bb0 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 8 Nov 2023 19:49:20 +0300 Subject: [PATCH 076/274] Add AWS retries Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/resources/common.py | 2 +- src/frostfs_testlib/s3/aws_cli_client.py | 71 ++++++------------------ 2 files changed, 17 insertions(+), 56 deletions(-) diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 131bf8a..7f8d2c4 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -43,6 +43,6 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file: # Number of attempts that S3 clients will attempt per each request (1 means single attempt # without any retries) -MAX_REQUEST_ATTEMPTS = 1 +MAX_REQUEST_ATTEMPTS = 5 RETRY_MODE = "standard" CREDENTIALS_CREATE_TIMEOUT = "1m" diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index dbece66..320d74b 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -7,12 +7,7 @@ from time import sleep from typing import Literal, Optional, Union from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.common import ( - ASSETS_DIR, - MAX_REQUEST_ATTEMPTS, - RETRY_MODE, - S3_SYNC_WAIT_TIME, -) +from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell.local_shell import LocalShell @@ -128,9 +123,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Put bucket tagging") def put_bucket_tagging(self, bucket: str, tags: list) -> None: - tags_json = { - "TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - } + tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} cmd = ( f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}" @@ -140,8 +133,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: cmd = ( - f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -149,10 +141,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -160,8 +149,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: cmd = ( - f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -169,10 +157,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = ( - f"aws {self.common_flags} s3api list-objects --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -183,10 +168,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List objects S3 v2") def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = ( - f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -371,10 +353,7 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - cmd = ( - f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " - f" --endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint}" if acl: cmd += f" --acl {acl}" if grant_write: @@ -442,9 +421,7 @@ class AwsCliClient(S3ClientWrapper): def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: - self.delete_object( - bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"] - ) + self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) @reporter.step_deco("Get object attributes") def get_object_attributes( @@ -480,10 +457,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket policy") def get_bucket_policy(self, bucket: str) -> dict: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Policy") @@ -505,10 +479,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("CORSRules") @@ -524,8 +495,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: cmd = ( - f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) self.local_shell.exec(cmd) @@ -608,10 +578,7 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: - cmd = ( - f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " - f"--endpoint-url {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint}" if metadata: cmd += " --metadata" for key, value in metadata.items(): @@ -674,9 +641,7 @@ class AwsCliClient(S3ClientWrapper): self.local_shell.exec(cmd) @reporter.step_deco("Upload part S3") - def upload_part( - self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str - ) -> str: + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " @@ -688,9 +653,7 @@ class AwsCliClient(S3ClientWrapper): return response["ETag"] @reporter.step_deco("Upload copy part S3") - def upload_part_copy( - self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str - ) -> str: + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " @@ -698,9 +661,7 @@ class AwsCliClient(S3ClientWrapper): ) output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) - assert response.get("CopyPartResult", []).get( - "ETag" - ), f"Expected ETag in response:\n{response}" + assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" return response["CopyPartResult"]["ETag"] From 72bd467c53b3ed451adef8f1d10db09928559f2e Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 10 Nov 2023 22:43:13 +0300 Subject: [PATCH 077/274] [#114] Add yaml configuration controllers Signed-off-by: Andrey Berezin --- pyproject.toml | 4 ++ src/frostfs_testlib/plugins/__init__.py | 13 ++++ src/frostfs_testlib/storage/cluster.py | 29 +++----- .../storage/configuration/interfaces.py | 65 ++++++++++++++++++ .../configuration/service_configuration.py | 67 +++++++++++++++++++ src/frostfs_testlib/storage/constants.py | 1 + .../controllers/cluster_state_controller.py | 22 ++++++ .../state_managers/config_state_manager.py | 51 ++++++++++++++ .../storage/dataclasses/node_base.py | 14 +++- 9 files changed, 244 insertions(+), 22 deletions(-) create mode 100644 src/frostfs_testlib/storage/configuration/interfaces.py create mode 100644 src/frostfs_testlib/storage/configuration/service_configuration.py create mode 100644 src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py diff --git a/pyproject.toml b/pyproject.toml index ba38c03..48cc418 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,10 @@ docker = "frostfs_testlib.hosting.docker_host:DockerHost" [project.entry-points."frostfs.testlib.healthcheck"] basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" +[project.entry-points."frostfs.testlib.csc_managers"] +config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager" + + [tool.isort] profile = "black" src_paths = ["src", "tests"] diff --git a/src/frostfs_testlib/plugins/__init__.py b/src/frostfs_testlib/plugins/__init__.py index 6914b9b..79de340 100644 --- a/src/frostfs_testlib/plugins/__init__.py +++ b/src/frostfs_testlib/plugins/__init__.py @@ -17,3 +17,16 @@ def load_plugin(plugin_group: str, name: str) -> Any: return None plugin = plugins[name] return plugin.load() + + +def load_all(group: str) -> Any: + """Loads all plugins using entry point specification. + + Args: + plugin_group: Name of plugin group. + + Returns: + Classes from specified group. + """ + plugins = entry_points(group=group) + return [plugin.load() for plugin in plugins] diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index fa4ee0a..b8c32ca 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -8,14 +8,10 @@ from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage import get_service_registry +from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml +from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration from frostfs_testlib.storage.constants import ConfigAttributes -from frostfs_testlib.storage.dataclasses.frostfs_services import ( - HTTPGate, - InnerRing, - MorphChain, - S3Gate, - StorageNode, -) +from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry @@ -93,6 +89,9 @@ class ClusterNode: config_str = yaml.dump(new_config) shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") + def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: + return ServiceConfiguration(self.service(service_type)) + def service(self, service_type: type[ServiceClass]) -> ServiceClass: """ Get a service cluster node of specified type. @@ -118,9 +117,7 @@ class ClusterNode: ) def get_list_of_services(self) -> list[str]: - return [ - config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services - ] + return [config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services] def get_all_interfaces(self) -> dict[str, str]: return self.host.config.interfaces @@ -130,9 +127,7 @@ class ClusterNode: def get_data_interfaces(self) -> list[str]: return [ - ip_address - for name_interface, ip_address in self.host.config.interfaces.items() - if "data" in name_interface + ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface ] def get_data_interface(self, search_interface: str) -> list[str]: @@ -221,9 +216,7 @@ class Cluster: cluster_nodes = set() for service in services: - cluster_nodes.update( - [node for node in self.cluster_nodes if node.service(type(service)) == service] - ) + cluster_nodes.update([node for node in self.cluster_nodes if node.service(type(service)) == service]) return list(cluster_nodes) @@ -331,8 +324,6 @@ class Cluster: return [node.get_endpoint() for node in nodes] def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]: - cluster_nodes = [ - node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips - ] + cluster_nodes = [node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips] with reporter.step(f"Return cluster nodes - {cluster_nodes}"): return cluster_nodes diff --git a/src/frostfs_testlib/storage/configuration/interfaces.py b/src/frostfs_testlib/storage/configuration/interfaces.py new file mode 100644 index 0000000..b2bc683 --- /dev/null +++ b/src/frostfs_testlib/storage/configuration/interfaces.py @@ -0,0 +1,65 @@ +from abc import ABC, abstractmethod +from typing import Any + + +class ServiceConfigurationYml(ABC): + """ + Class to manipulate yml configuration for service + """ + + def _find_option(self, key: str, data: dict): + tree = key.split(":") + current = data + for node in tree: + if isinstance(current, list) and len(current) - 1 >= int(node): + current = current[int(node)] + continue + + if node not in current: + return None + + current = current[node] + + return current + + def _set_option(self, key: str, value: Any, data: dict): + tree = key.split(":") + current = data + for node in tree[:-1]: + if isinstance(current, list) and len(current) - 1 >= int(node): + current = current[int(node)] + continue + + if node not in current: + current[node] = {} + + current = current[node] + + current[tree[-1]] = value + + @abstractmethod + def get(self, key: str) -> str: + """ + Get parameter value from current configuration + + Args: + key: key of the parameter in yaml format like 'storage:shard:default:resync_metabase' + + Returns: + value of the parameter + """ + + @abstractmethod + def set(self, values: dict[str, Any]): + """ + Sets parameters to configuration + + Args: + values: dict where key is the key of the parameter in yaml format like 'storage:shard:default:resync_metabase' and value is the value of the option to set + """ + + @abstractmethod + def revert(self): + """ + Revert changes + """ diff --git a/src/frostfs_testlib/storage/configuration/service_configuration.py b/src/frostfs_testlib/storage/configuration/service_configuration.py new file mode 100644 index 0000000..1aa7846 --- /dev/null +++ b/src/frostfs_testlib/storage/configuration/service_configuration.py @@ -0,0 +1,67 @@ +import os +import re +from typing import Any + +import yaml + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell.interfaces import CommandOptions +from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml +from frostfs_testlib.storage.dataclasses.node_base import ServiceClass + +reporter = get_reporter() + + +class ServiceConfiguration(ServiceConfigurationYml): + def __init__(self, service: "ServiceClass") -> None: + self.service = service + self.shell = self.service.host.get_shell() + self.confd_path = os.path.join(self.service.config_dir, "conf.d") + self.custom_file = os.path.join(self.confd_path, "99_changes.yml") + + def _path_exists(self, path: str) -> bool: + return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code + + def _get_data_from_file(self, path: str) -> dict: + content = self.shell.exec(f"cat {path}").stdout + data = yaml.safe_load(content) + return data + + def get(self, key: str) -> str: + with reporter.step(f"Get {key} configuration value for {self.service}"): + config_files = [self.service.main_config_path] + + if self._path_exists(self.confd_path): + files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split() + # Sorting files in backwards order from latest to first one + config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0]))) + + result = None + for file in files: + data = self._get_data_from_file(file) + result = self._find_option(key, data) + if result is not None: + break + + return result + + def set(self, values: dict[str, Any]): + with reporter.step(f"Change configuration for {self.service}"): + if not self._path_exists(self.confd_path): + self.shell.exec(f"mkdir {self.confd_path}") + + if self._path_exists(self.custom_file): + data = self._get_data_from_file(self.custom_file) + else: + data = {} + + for key, value in values.items(): + self._set_option(key, value, data) + + content = yaml.dump(data) + self.shell.exec(f"echo '{content}' | sudo tee {self.custom_file}") + self.shell.exec(f"chmod 777 {self.custom_file}") + + def revert(self): + with reporter.step(f"Revert changed options for {self.service}"): + self.shell.exec(f"rm -rf {self.custom_file}") diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 2284ce3..9ad24eb 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -3,6 +3,7 @@ class ConfigAttributes: WALLET_PASSWORD = "wallet_password" WALLET_PATH = "wallet_path" WALLET_CONFIG = "wallet_config" + CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" SHARD_CONFIG_PATH = "shard_config_path" LOCAL_WALLET_PATH = "local_wallet_path" diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 473af10..479f4dc 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,8 +1,10 @@ import datetime import time +from typing import TypeVar import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.healthcheck.interfaces import Healthcheck +from frostfs_testlib.plugins import load_all from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper @@ -22,6 +24,14 @@ reporter = get_reporter() if_up_down_helper = IfUpDownHelper() +class StateManager: + def __init__(self, cluster_state_controller: "ClusterStateController") -> None: + self.csc = cluster_state_controller + + +StateManagerClass = TypeVar("StateManagerClass", bound=StateManager) + + class ClusterStateController: def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: self.stopped_nodes: list[ClusterNode] = [] @@ -33,6 +43,18 @@ class ClusterStateController: self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} self.nodes_with_modified_interface: list[ClusterNode] = [] + self.managers: list[StateManagerClass] = [] + + # TODO: move all functionality to managers + managers = set(load_all(group="frostfs.testlib.csc_managers")) + for manager in managers: + self.managers.append(manager(self)) + + def manager(self, manager_type: type[StateManagerClass]) -> StateManagerClass: + for manager in self.managers: + # Subclasses here for the future if we have overriding subclasses of base interface + if issubclass(type(manager), manager_type): + return manager def _get_stopped_by_node(self, node: ClusterNode) -> set[NodeBase]: stopped_by_node = [svc for svc in self.stopped_services if svc.host == node.host] diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py new file mode 100644 index 0000000..078d483 --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -0,0 +1,51 @@ +from typing import Any + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager +from frostfs_testlib.storage.dataclasses.node_base import ServiceClass +from frostfs_testlib.testing import parallel + +reporter = get_reporter() + + +class ConfigStateManager(StateManager): + def __init__(self, cluster_state_controller: ClusterStateController) -> None: + super().__init__(cluster_state_controller) + self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set() + self.cluster = self.csc.cluster + + @reporter.step_deco("Change configuration for {service_type} on all nodes") + def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): + services = self.cluster.services(service_type) + nodes = self.cluster.nodes(services) + self.services_with_changed_config.update([(node, service_type) for node in nodes]) + + self.csc.stop_services_of_type(service_type) + parallel([node.config(service_type).set for node in nodes], values=values) + self.csc.start_services_of_type(service_type) + + @reporter.step_deco("Change configuration for {service_type} on {node}") + def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): + self.services_with_changed_config.add((node, service_type)) + + self.csc.stop_service_of_type(node, service_type) + node.config(service_type).set(values) + self.csc.start_service_of_type(node, service_type) + + @reporter.step_deco("Revert all configuration changes") + def revert_all(self): + if not self.services_with_changed_config: + return + + parallel(self._revert_svc, self.services_with_changed_config) + self.services_with_changed_config.clear() + + self.csc.start_all_stopped_services() + + # TODO: parallel can't have multiple parallel_items :( + @reporter.step_deco("Revert all configuration {node_and_service}") + def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): + node, service_type = node_and_service + self.csc.stop_service_of_type(node, service_type) + node.config(service_type).revert() diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 1e23c7e..4b9ffc2 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -120,6 +120,15 @@ class NodeBase(HumanReadableABC): ConfigAttributes.WALLET_CONFIG, ) + @property + def config_dir(self) -> str: + return self._get_attribute(ConfigAttributes.CONFIG_DIR) + + @property + def main_config_path(self) -> str: + return self._get_attribute(ConfigAttributes.CONFIG_PATH) + + # TODO: Deprecated def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]: if config_file_path is None: config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) @@ -132,6 +141,7 @@ class NodeBase(HumanReadableABC): config = yaml.safe_load(config_text) return config_file_path, config + # TODO: Deprecated def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None: if config_file_path is None: config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) @@ -146,9 +156,7 @@ class NodeBase(HumanReadableABC): storage_wallet_pass = self.get_wallet_password() return wallet_utils.get_wallet_public_key(storage_wallet_path, storage_wallet_pass) - def _get_attribute( - self, attribute_name: str, default_attribute_name: Optional[str] = None - ) -> str: + def _get_attribute(self, attribute_name: str, default_attribute_name: Optional[str] = None) -> str: config = self.host.get_service_config(self.name) if attribute_name not in config.attributes: From 6519cfafc96e8880ce8fd69b989b83a2e5013da9 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 13 Nov 2023 13:34:49 +0300 Subject: [PATCH 078/274] [#116] Updates for local scenario teardown Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/runners.py | 68 ++++++------------- .../controllers/cluster_state_controller.py | 3 +- 2 files changed, 21 insertions(+), 50 deletions(-) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 9859256..583c8e6 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -18,11 +18,7 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources import optionals from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import STORAGE_USER_NAME -from frostfs_testlib.resources.load_params import ( - BACKGROUND_LOAD_VUS_COUNT_DIVISOR, - LOAD_NODE_SSH_USER, - LOAD_NODES, -) +from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput from frostfs_testlib.storage.cluster import ClusterNode @@ -83,14 +79,10 @@ class DefaultRunner(RunnerBase): with reporter.step("Init s3 client on loaders"): storage_node = nodes_under_load[0].service(StorageNode) - s3_public_keys = [ - node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes - ] + s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] grpc_peer = storage_node.get_rpc_endpoint() - parallel( - self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir - ) + parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir) def _prepare_loader( self, @@ -112,9 +104,9 @@ class DefaultRunner(RunnerBase): wallet_password=self.loaders_wallet.password, ).stdout aws_access_key_id = str( - re.search( - r"access_key_id.*:\s.(?P\w*)", issue_secret_output - ).group("aws_access_key_id") + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( + "aws_access_key_id" + ) ) aws_secret_access_key = str( re.search( @@ -125,9 +117,7 @@ class DefaultRunner(RunnerBase): configure_input = [ InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), - InteractiveInput( - prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key - ), + InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key), InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""), ] @@ -144,16 +134,12 @@ class DefaultRunner(RunnerBase): } endpoints_generators = { K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), - K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle( - [[endpoint] for endpoint in endpoints] - ), + K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle([[endpoint] for endpoint in endpoints]), } k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] - distributed_load_params_list = self._get_distributed_load_params_list( - load_params, k6_processes_count - ) + distributed_load_params_list = self._get_distributed_load_params_list(load_params, k6_processes_count) futures = parallel( self._init_k6_instance, @@ -164,9 +150,7 @@ class DefaultRunner(RunnerBase): ) self.k6_instances = [future.result() for future in futures] - def _init_k6_instance( - self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str - ): + def _init_k6_instance(self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str): shell = loader.get_shell() with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): with reporter.step(f"Make working directory"): @@ -204,9 +188,7 @@ class DefaultRunner(RunnerBase): and getattr(original_load_params, field.name) is not None ): original_value = getattr(original_load_params, field.name) - distribution = self._get_distribution( - math.ceil(original_value / divisor), workers_count - ) + distribution = self._get_distribution(math.ceil(original_value / divisor), workers_count) for i in range(workers_count): setattr(distributed_load_params[i], field.name, distribution[i]) @@ -233,10 +215,7 @@ class DefaultRunner(RunnerBase): # Remainder of clients left to be distributed remainder = clients_count - clients_per_worker * workers_count - distribution = [ - clients_per_worker + 1 if i < remainder else clients_per_worker - for i in range(workers_count) - ] + distribution = [clients_per_worker + 1 if i < remainder else clients_per_worker for i in range(workers_count)] return distribution def start(self): @@ -245,9 +224,7 @@ class DefaultRunner(RunnerBase): parallel([k6.start for k6 in self.k6_instances]) wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 - with reporter.step( - f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" - ): + with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): time.sleep(wait_after_start_time) def stop(self): @@ -327,9 +304,7 @@ class LocalRunner(RunnerBase): with reporter.step("Update limits.conf"): limits_path = "/etc/security/limits.conf" self.file_keeper.add(cluster_node.storage_node, limits_path) - content = ( - f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" - ) + content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" shell.exec(f"echo '{content}' | sudo tee {limits_path}") with reporter.step("Download K6"): @@ -339,9 +314,7 @@ class LocalRunner(RunnerBase): shell.exec(f"sudo chmod -R 777 {k6_dir}") with reporter.step("Create empty_passwd"): - self.wallet = WalletInfo( - f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml" - ) + self.wallet = WalletInfo(f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml") content = yaml.dump({"password": ""}) shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") @@ -383,15 +356,13 @@ class LocalRunner(RunnerBase): def start(self): load_params = self.k6_instances[0].load_params - self.cluster_state_controller.stop_all_s3_gates() - self.cluster_state_controller.stop_all_storage_services() + self.cluster_state_controller.stop_services_of_type(S3Gate) + self.cluster_state_controller.stop_services_of_type(StorageNode) parallel([k6.start for k6 in self.k6_instances]) wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 - with reporter.step( - f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" - ): + with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): time.sleep(wait_after_start_time) @reporter.step_deco("Restore passwd on {cluster_node}") @@ -408,8 +379,7 @@ class LocalRunner(RunnerBase): for k6_instance in self.k6_instances: k6_instance.stop() - self.cluster_state_controller.start_stopped_storage_services() - self.cluster_state_controller.start_stopped_s3_gates() + self.cluster_state_controller.start_all_stopped_services() def get_results(self) -> dict: results = {} diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 479f4dc..45c08b3 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -173,7 +173,8 @@ class ClusterStateController: @reporter.step_deco("Wait for S3Gates reconnection to local storage") def wait_s3gates(self): online_s3gates = self._get_online(S3Gate) - parallel(self.wait_s3gate, online_s3gates) + if online_s3gates: + parallel(self.wait_s3gate, online_s3gates) @wait_for_success(600, 60) def wait_tree_healthcheck(self): From 61a1b2865241374fede87a7d2fd9ae6f849c34b1 Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Tue, 14 Nov 2023 14:00:08 +0300 Subject: [PATCH 079/274] s3local.js scenario Signed-off-by: m.malygina --- src/frostfs_testlib/load/__init__.py | 2 +- src/frostfs_testlib/load/load_config.py | 18 +++- src/frostfs_testlib/load/load_metrics.py | 9 ++ src/frostfs_testlib/load/load_report.py | 1 + src/frostfs_testlib/load/runners.py | 126 +++++++++++++++++++++++ 5 files changed, 150 insertions(+), 6 deletions(-) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py index 74b710f..ca2f120 100644 --- a/src/frostfs_testlib/load/__init__.py +++ b/src/frostfs_testlib/load/__init__.py @@ -11,4 +11,4 @@ from frostfs_testlib.load.load_config import ( ) from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader -from frostfs_testlib.load.runners import DefaultRunner, LocalRunner +from frostfs_testlib.load.runners import DefaultRunner, LocalRunner, S3LocalRunner diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index a5d8535..735d8ec 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -20,6 +20,7 @@ class LoadScenario(Enum): S3 = "s3" S3_CAR = "s3_car" S3_MULTIPART = "s3_multipart" + S3_LOCAL = "s3local" HTTP = "http" VERIFY = "verify" LOCAL = "local" @@ -38,11 +39,12 @@ all_load_scenarios = [ LoadScenario.S3_CAR, LoadScenario.gRPC_CAR, LoadScenario.LOCAL, - LoadScenario.S3_MULTIPART + LoadScenario.S3_MULTIPART, + LoadScenario.S3_LOCAL ] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] -constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART] +constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] grpc_preset_scenarios = [ @@ -51,7 +53,7 @@ grpc_preset_scenarios = [ LoadScenario.gRPC_CAR, LoadScenario.LOCAL, ] -s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART] +s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL] @dataclass @@ -172,9 +174,13 @@ class LoadParams: preset: Optional[Preset] = None # K6 download url k6_url: Optional[str] = None + # Requests module url + requests_module_url: Optional[str] = None + # aws cli download url + awscli_url: Optional[str] = None # No ssl verification flag no_verify_ssl: Optional[bool] = metadata_field( - [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.VERIFY, LoadScenario.HTTP], + [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL, LoadScenario.VERIFY, LoadScenario.HTTP], "no-verify-ssl", "NO_VERIFY_SSL", False, @@ -283,7 +289,9 @@ class LoadParams: # ------- LOCAL SCENARIO PARAMS ------- # Config file location (filled automatically) - config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE", False) + config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False) + # Config directory location (filled automatically) + config_dir: Optional[str] = metadata_field([LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False) def set_id(self, load_id): self.load_id = load_id diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 474a96b..3f175cf 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -165,6 +165,14 @@ class S3Metrics(MetricsBase): _DELETE_ERRORS = "aws_obj_delete_fails" _DELETE_LATENCY = "aws_obj_delete_duration" +class S3LocalMetrics(MetricsBase): + _WRITE_SUCCESS = "s3local_obj_put_total" + _WRITE_ERRORS = "s3local_obj_put_fails" + _WRITE_LATENCY = "s3local_obj_put_duration" + + _READ_SUCCESS = "s3local_obj_get_total" + _READ_ERRORS = "s3local_obj_get_fails" + _READ_LATENCY = "s3local_obj_get_duration" class LocalMetrics(MetricsBase): _WRITE_SUCCESS = "local_obj_put_total" @@ -197,6 +205,7 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr LoadScenario.S3: S3Metrics, LoadScenario.S3_CAR: S3Metrics, LoadScenario.S3_MULTIPART: S3Metrics, + LoadScenario.S3_LOCAL: S3LocalMetrics, LoadScenario.VERIFY: VerifyMetrics, LoadScenario.LOCAL: LocalMetrics, } diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index b648bc2..ad3a26d 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -97,6 +97,7 @@ class LoadReport: LoadScenario.gRPC_CAR: "open model", LoadScenario.S3_CAR: "open model", LoadScenario.LOCAL: "local fill", + LoadScenario.S3_LOCAL: "local fill" } return model_map[self.load_params.scenario] diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 583c8e6..982cfcc 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -390,3 +390,129 @@ class LocalRunner(RunnerBase): parallel(self.restore_passwd_on_node, self.nodes_under_load) return results + +class S3LocalRunner(LocalRunner): + endpoints: list[str] + k6_dir: str + + @reporter.step_deco("Run preset on loaders") + def preset(self): + LocalRunner.preset(self) + with reporter.step(f"Resolve containers in preset"): + parallel(self._resolve_containers_in_preset, self.k6_instances) + + @reporter.step_deco("Resolve containers in preset") + def _resolve_containers_in_preset(self, k6_instance: K6): + k6_instance.shell.exec( + f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}") + + @reporter.step_deco("Init k6 instances") + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + self.k6_instances = [] + futures = parallel( + self._init_k6_instance_, + self.loaders, + load_params, + endpoints, + k6_dir, + ) + self.k6_instances = [future.result() for future in futures] + + def _init_k6_instance_(self, loader: Loader, load_params: LoadParams, endpoints: list[str], k6_dir: str): + shell = loader.get_shell() + with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params.working_dir}") + # If we chmod /home/ folder we can no longer ssh to the node + # !! IMPORTANT !! + if ( + load_params.working_dir + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" + ): + shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") + + return K6( + load_params, + self.endpoints, + k6_dir, + shell, + loader, + self.wallet, + ) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Preparation steps") + def prepare( + self, + load_params: LoadParams, + cluster_nodes: list[ClusterNode], + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + self.k6_dir = k6_dir + with reporter.step("Init s3 client on loaders"): + storage_node = nodes_under_load[0].service(StorageNode) + s3_public_keys = [ + node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes + ] + grpc_peer = storage_node.get_rpc_endpoint() + + parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer) + + @reporter.step_deco("Prepare node {cluster_node}") + def prepare_node(self, + cluster_node: ClusterNode, + k6_dir: str, + load_params: LoadParams, + s3_public_keys: list[str], + grpc_peer: str): + LocalRunner.prepare_node(self,cluster_node, k6_dir, load_params) + self.endpoints = cluster_node.s3_gate.get_all_endpoints() + shell = cluster_node.host.get_shell() + + with reporter.step("Uninstall previous installation of aws cli"): + shell.exec(f"sudo rm -rf /usr/local/aws-cli") + shell.exec(f"sudo rm -rf /usr/local/bin/aws") + shell.exec(f"sudo rm -rf /usr/local/bin/aws_completer") + + with reporter.step("Install aws cli"): + shell.exec(f"sudo curl {load_params.awscli_url} -o {k6_dir}/awscliv2.zip") + shell.exec(f"sudo unzip -q {k6_dir}/awscliv2.zip -d {k6_dir}") + shell.exec(f"sudo {k6_dir}/aws/install") + + with reporter.step("Install requests python module"): + shell.exec(f"sudo apt-get -y install python3-pip") + shell.exec(f"sudo curl -so {k6_dir}/requests.tar.gz {load_params.requests_module_url}") + shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz") + + with reporter.step(f"Init s3 client on {cluster_node.host_ip}"): + frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate_exec.secret.issue( + wallet=self.wallet.path, + peer=grpc_peer, + gate_public_key=s3_public_keys, + container_placement_policy=load_params.preset.container_placement_policy, + container_policy=f"{k6_dir}/scenarios/files/policy.json", + wallet_password=self.wallet.password, + ).stdout + aws_access_key_id = str( + re.search( + r"access_key_id.*:\s.(?P\w*)", issue_secret_output + ).group("aws_access_key_id") + ) + aws_secret_access_key = str( + re.search( + r"secret_access_key.*:\s.(?P\w*)", + issue_secret_output, + ).group("aws_secret_access_key") + ) + configure_input = [ + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), + InteractiveInput( + prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key + ), + InteractiveInput(prompt_pattern=r".*", input=""), + InteractiveInput(prompt_pattern=r".*", input=""), + ] + shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) \ No newline at end of file From 22647c6d594d3b914676600a85b9d1632c4e426b Mon Sep 17 00:00:00 2001 From: mkadilov Date: Wed, 15 Nov 2023 13:08:58 +0300 Subject: [PATCH 080/274] [#119] Renamed Github to Gitea in links Some links changed to git.frostfs from github Signed-off-by: Mikhail Kadilov --- CONTRIBUTING.md | 14 +++++++------- README.md | 2 +- pyproject.toml | 2 +- src/frostfs_testlib/cli/frostfs_adm/morph.py | 4 ++-- src/frostfs_testlib/cli/frostfs_cli/acl.py | 2 +- src/frostfs_testlib/steps/node_management.py | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fdcaec7..69417d2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,8 +3,8 @@ First, thank you for contributing! We love and encourage pull requests from everyone. Please follow the guidelines: -- Check the open [issues](https://github.com/TrueCloudLab/frostfs-testlib/issues) and - [pull requests](https://github.com/TrueCloudLab/frostfs-testlib/pulls) for existing +- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/issues) and + [pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/pulls) for existing discussions. - Open an issue first, to discuss a new feature or enhancement. @@ -26,8 +26,8 @@ Start by forking the `frostfs-testlib` repository, make changes in a branch and send a pull request. We encourage pull requests to discuss code changes. Here are the steps in details: -### Set up your GitHub Repository -Fork [FrostFS testlib upstream](https://github.com/TrueCloudLab/frostfs-testlib/fork) source +### Set up your Git Repository +Fork [FrostFS testlib upstream](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/forks) source repository to your own personal repository. Copy the URL of your fork and clone it: ```shell @@ -37,7 +37,7 @@ $ git clone ### Set up git remote as ``upstream`` ```shell $ cd frostfs-testlib -$ git remote add upstream https://github.com/TrueCloudLab/frostfs-testlib +$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-testlib $ git fetch upstream ``` @@ -99,8 +99,8 @@ $ git push origin feature/123-something_awesome ``` ### Create a Pull Request -Pull requests can be created via GitHub. Refer to [this -document](https://help.github.com/articles/creating-a-pull-request/) for +Pull requests can be created via Git. Refer to [this +document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for detailed steps on how to create a pull request. After a Pull Request gets peer reviewed and approved, it will be merged. diff --git a/README.md b/README.md index c194df9..2f8751f 100644 --- a/README.md +++ b/README.md @@ -92,4 +92,4 @@ The library provides the following primary components: ## Contributing -Any contributions to the library should conform to the [contribution guideline](https://github.com/TrueCloudLab/frostfs-testlib/blob/master/CONTRIBUTING.md). +Any contributions to the library should conform to the [contribution guideline](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/src/branch/master/CONTRIBUTING.md). diff --git a/pyproject.toml b/pyproject.toml index 48cc418..34a37e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ requires-python = ">=3.10" dev = ["black", "bumpver", "isort", "pre-commit"] [project.urls] -Homepage = "https://github.com/TrueCloudLab/frostfs-testlib" +Homepage = "https://git.frostfs.info/TrueCloudLab/frostfs-testlib" [project.entry-points."frostfs.testlib.reporter"] allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 3faa875..a1693ac 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -219,7 +219,7 @@ class FrostfsAdmMorph(CliCommand): container_alias_fee: Container alias fee (default 500). container_fee: Container registration fee (default 1000). contracts: Path to archive with compiled FrostFS contracts - (default fetched from latest github release). + (default fetched from latest git release). epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240). homomorphic_disabled: Disable object homomorphic hashing. local_dump: Path to the blocks dump file. @@ -340,7 +340,7 @@ class FrostfsAdmMorph(CliCommand): Args: alphabet_wallets: Path to alphabet wallets dir. contracts: Path to archive with compiled FrostFS contracts - (default fetched from latest github release). + (default fetched from latest git release). rpc_endpoint: N3 RPC node endpoint. Returns: diff --git a/src/frostfs_testlib/cli/frostfs_cli/acl.py b/src/frostfs_testlib/cli/frostfs_cli/acl.py index bd0f80e..3e60582 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/acl.py +++ b/src/frostfs_testlib/cli/frostfs_cli/acl.py @@ -22,7 +22,7 @@ class FrostfsCliACL(CliCommand): Well-known system object headers start with '$Object:' prefix. User defined headers start without prefix. Read more about filter keys at: - http://github.com/TrueCloudLab/frostfs-api/blob/master/proto-docs/acl.md#message-eaclrecordfilter + https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecord-filter Match is '=' for matching and '!=' for non-matching filter. Value is a valid unicode string corresponding to object or request header value. diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index 9c0c6b0..d91721c 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -169,7 +169,7 @@ def include_node_to_network_map( storage_node_set_status(node_to_include, status="online") # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. - # First sleep can be omitted after https://github.com/TrueCloudLab/frostfs-node/issues/60 complete. + # First sleep can be omitted after https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/60 complete. time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) tick_epoch(shell, cluster) From ed70dada963229cfdd2033df4b1dc9ec556f56bf Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 20 Nov 2023 13:54:47 +0300 Subject: [PATCH 081/274] Add support test maintenance Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 96 +++++++------------ src/frostfs_testlib/cli/frostfs_cli/cli.py | 3 + .../cli/frostfs_cli/control.py | 58 +++++++++++ src/frostfs_testlib/cli/netmap_parser.py | 86 +++++++++++++++++ src/frostfs_testlib/shell/local_shell.py | 7 +- src/frostfs_testlib/shell/ssh_shell.py | 33 ++----- .../controllers/cluster_state_controller.py | 78 +++++++++++++++ .../dataclasses/storage_object_info.py | 25 ++++- 8 files changed, 290 insertions(+), 96 deletions(-) create mode 100644 src/frostfs_testlib/cli/frostfs_cli/control.py create mode 100644 src/frostfs_testlib/cli/netmap_parser.py diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index a1693ac..1d753d9 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -27,11 +27,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph deposit-notary", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def dump_balances( @@ -56,11 +52,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-balances", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def dump_config(self, rpc_endpoint: str) -> CommandResult: @@ -74,11 +66,25 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-config", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + ) + + def set_config( + self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None + ) -> CommandResult: + """Add/update global config value in the FrostFS network. + + Args: + set_key_value: key1=val1 [key2=val2 ...] + alphabet_wallets: Path to alphabet wallets dir + rpc_endpoint: N3 RPC node endpoint + + Returns: + Command's result. + """ + return self._execute( + f"morph set-config {set_key_value}", + **{param: param_value for param, param_value in locals().items() if param not in ["self", "set_key_value"]}, ) def dump_containers( @@ -101,11 +107,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-containers", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def dump_hashes(self, rpc_endpoint: str) -> CommandResult: @@ -119,11 +121,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-hashes", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def force_new_epoch( @@ -140,11 +138,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph force-new-epoch", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def generate_alphabet( @@ -165,11 +159,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph generate-alphabet", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def generate_storage_wallet( @@ -192,11 +182,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph generate-storage-wallet", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def init( @@ -232,11 +218,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph init", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def refill_gas( @@ -259,11 +241,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph refill-gas", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def restore_containers( @@ -286,11 +264,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph restore-containers", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def set_policy( @@ -348,17 +322,13 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph update-contracts", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def remove_nodes( self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None ) -> CommandResult: - """ Move node to the Offline state in the candidates list + """Move node to the Offline state in the candidates list and tick an epoch to update the netmap using frostfs-adm Args: @@ -371,7 +341,7 @@ class FrostfsAdmMorph(CliCommand): """ if not len(node_netmap_keys): raise AttributeError("Got empty node_netmap_keys list") - + return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", **{ @@ -379,4 +349,4 @@ class FrostfsAdmMorph(CliCommand): for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"] }, - ) \ No newline at end of file + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index a78da8b..c20a987 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -3,6 +3,7 @@ from typing import Optional from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer +from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession @@ -25,6 +26,7 @@ class FrostfsCli: storagegroup: FrostfsCliStorageGroup util: FrostfsCliUtil version: FrostfsCliVersion + control: FrostfsCliControl def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) @@ -38,3 +40,4 @@ class FrostfsCli: self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file) self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) + self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/control.py b/src/frostfs_testlib/cli/frostfs_cli/control.py new file mode 100644 index 0000000..bfcd6ec --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/control.py @@ -0,0 +1,58 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliControl(CliCommand): + def set_status( + self, + endpoint: str, + status: str, + wallet: Optional[str] = None, + force: Optional[bool] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Set status of the storage node in FrostFS network map + + Args: + wallet: Path to the wallet or binary key + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + force: Force turning to local maintenance + status: New netmap status keyword ('online', 'offline', 'maintenance') + timeout: Timeout for an operation (default 15s) + + Returns: + Command`s result. + """ + return self._execute( + "control set-status", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def healthcheck( + self, + endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Set status of the storage node in FrostFS network map + + Args: + wallet: Path to the wallet or binary key + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + force: Force turning to local maintenance + status: New netmap status keyword ('online', 'offline', 'maintenance') + timeout: Timeout for an operation (default 15s) + + Returns: + Command`s result. + """ + return self._execute( + "control healthcheck", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py new file mode 100644 index 0000000..6d2eaaa --- /dev/null +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -0,0 +1,86 @@ +import re + +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo + + +class NetmapParser: + @staticmethod + def netinfo(output: str) -> NodeNetInfo: + regexes = { + "epoch": r"Epoch: (?P\d+)", + "network_magic": r"Network magic: (?P.*$)", + "time_per_block": r"Time per block: (?P\d+\w+)", + "container_fee": r"Container fee: (?P\d+)", + "epoch_duration": r"Epoch duration: (?P\d+)", + "inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P\d+)", + "maximum_object_size": r"Maximum object size: (?P\d+)", + "withdrawal_fee": r"Withdrawal fee: (?P\d+)", + "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", + "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", + "eigen_trust_alpha": r"EigenTrustAlpha: (?P\d+\w+$)", + "eigen_trust_iterations": r"EigenTrustIterations: (?P\d+)", + } + parse_result = {} + + for key, regex in regexes.items(): + search_result = re.search(regex, output, flags=re.MULTILINE) + if search_result == None: + parse_result[key] = None + continue + parse_result[key] = search_result[key].strip() + + node_netinfo = NodeNetInfo(**parse_result) + + return node_netinfo + + @staticmethod + def snapshot_all_nodes(output: str) -> list[NodeNetmapInfo]: + """The code will parse each line and return each node as dataclass.""" + netmap_nodes = output.split("Node ")[1:] + dataclasses_netmap = [] + result_netmap = {} + + regexes = { + "node_id": r"\d+: (?P\w+)", + "node_data_ips": r"(?P/ip4/.+?)$", + "node_status": r"(?PONLINE|OFFLINE)", + "cluster_name": r"ClusterName: (?P\w+)", + "continent": r"Continent: (?P\w+)", + "country": r"Country: (?P\w+)", + "country_code": r"CountryCode: (?P\w+)", + "external_address": r"ExternalAddr: (?P/ip[4].+?)$", + "location": r"Location: (?P\w+.*)", + "node": r"Node: (?P\d+\.\d+\.\d+\.\d+)", + "price": r"Price: (?P\d+)", + "sub_div": r"SubDiv: (?P.*)", + "sub_div_code": r"SubDivCode: (?P\w+)", + "un_locode": r"UN-LOCODE: (?P\w+.*)", + "role": r"role: (?P\w+)", + } + + for node in netmap_nodes: + for key, regex in regexes.items(): + search_result = re.search(regex, node, flags=re.MULTILINE) + if key == "node_data_ips": + result_netmap[key] = search_result[key].strip().split(" ") + continue + if key == "external_address": + result_netmap[key] = search_result[key].strip().split(",") + continue + if search_result == None: + result_netmap[key] = None + continue + result_netmap[key] = search_result[key].strip() + + dataclasses_netmap.append(NodeNetmapInfo(**result_netmap)) + + return dataclasses_netmap + + @staticmethod + def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: + snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) + snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip] + if not snapshot_node: + return None + return snapshot_node[0] diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index fa07890..26c7e9b 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -62,7 +62,8 @@ class LocalShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( f"Command: {command}\nreturn code: {result.return_code}\n" - f"Output: {result.stdout}" + f"Output: {result.stdout}\n" + f"Stderr: {result.stderr}\n" ) return result @@ -94,9 +95,7 @@ class LocalShell(Shell): return_code=exc.returncode, ) raise RuntimeError( - f"Command: {command}\nError:\n" - f"return code: {exc.returncode}\n" - f"output: {exc.output}" + f"Command: {command}\nError:\n" f"return code: {exc.returncode}\n" f"output: {exc.output}" ) from exc except OSError as exc: raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 6db7d51..6b12f81 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -6,27 +6,11 @@ from functools import lru_cache, wraps from time import sleep from typing import ClassVar, Optional, Tuple -from paramiko import ( - AutoAddPolicy, - Channel, - ECDSAKey, - Ed25519Key, - PKey, - RSAKey, - SSHClient, - SSHException, - ssh_exception, -) +from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception from paramiko.ssh_exception import AuthenticationException from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.shell.interfaces import ( - CommandInspector, - CommandOptions, - CommandResult, - Shell, - SshCredentials, -) +from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials logger = logging.getLogger("frostfs.testlib.shell") reporter = get_reporter() @@ -97,8 +81,7 @@ class SshConnectionProvider: ) else: logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using password " - f"(attempt {attempt})" + f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})" ) connection.connect( hostname=host, @@ -141,9 +124,7 @@ class HostIsNotAvailable(Exception): def log_command(func): @wraps(func) - def wrapper( - shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs - ) -> CommandResult: + def wrapper(shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs) -> CommandResult: command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n") with reporter.step(command_info): logger.info(f'Execute command "{command}" on "{shell.host}"') @@ -238,15 +219,13 @@ class SSHShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" + f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n" ) return result @log_command def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: - stdin, stdout, stderr = self._connection.exec_command( - command, timeout=options.timeout, get_pty=True - ) + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True) for interactive_input in options.interactive_inputs: input = interactive_input.input if not input.endswith("\n"): diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 45c08b3..27fa034 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -3,9 +3,13 @@ import time from typing import TypeVar import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib.cli import FrostfsAdm, FrostfsCli +from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.plugins import load_all from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode @@ -13,6 +17,7 @@ from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.utils.datetime_utils import parse_time from frostfs_testlib.utils.failover_utils import ( wait_all_storage_nodes_returned, wait_for_host_offline, @@ -426,6 +431,79 @@ class ClusterStateController: return parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) + @reporter.step_deco("Set MaintenanceModeAllowed - {status}") + def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: + frostfs_adm = FrostfsAdm( + shell=cluster_node.host.get_shell(), + frostfs_adm_exec_path=FROSTFS_ADM_EXEC, + config_file=FROSTFS_ADM_CONFIG_PATH, + ) + frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") + + @reporter.step_deco("Set mode node to {status}") + def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None: + rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() + control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() + + frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, cluster_node=cluster_node) + node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint, wallet=wallet).stdout) + + with reporter.step("If status maintenance, then check that the option is enabled"): + if node_netinfo.maintenance_mode_allowed == "false": + frostfs_adm.morph.set_config(set_key_value="MaintenanceModeAllowed=true") + + with reporter.step(f"Change the status to {status}"): + frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status=status) + + if not await_tick: + return + + with reporter.step("Tick 1 epoch, and await 2 block"): + frostfs_adm.morph.force_new_epoch() + time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) + + self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node) + + @wait_for_success(80, 8) + @reporter.step_deco("Check status node, status - {status}") + def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode): + frostfs_cli = FrostfsCli( + shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG + ) + netmap = NetmapParser.snapshot_all_nodes( + frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint(), wallet=wallet).stdout + ) + netmap = [node for node in netmap if cluster_node.host_ip == node.node] + if status == "offline": + assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" + else: + assert netmap[0].node_status == status.upper(), f"Node state - {netmap[0].node_status} != {status} expect" + + def _get_cli(self, local_shell: Shell, cluster_node: ClusterNode) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: + # TODO Move to service config + host = cluster_node.host + service_config = host.get_service_config(cluster_node.storage_node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + frostfs_adm = FrostfsAdm( + shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH + ) + frostfs_cli = FrostfsCli( + shell=local_shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG + ) + frostfs_cli_remote = FrostfsCli( + shell=shell, + frostfs_cli_exec_path=FROSTFS_CLI_EXEC, + config_file=wallet_config_path, + ) + return frostfs_adm, frostfs_cli, frostfs_cli_remote + def _enable_date_synchronizer(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("timedatectl set-ntp true") diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index d670d8e..63a3cf2 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,5 +1,4 @@ from dataclasses import dataclass -from enum import Enum from typing import Optional from frostfs_testlib.testing.readable import HumanReadableEnum @@ -28,10 +27,16 @@ class StorageObjectInfo(ObjectRef): locks: Optional[list[LockObjectInfo]] = None +class ModeNode(HumanReadableEnum): + MAINTENANCE: str = "maintenance" + ONLINE: str = "online" + OFFLINE: str = "offline" + + @dataclass class NodeNetmapInfo: node_id: str = None - node_status: str = None + node_status: ModeNode = None node_data_ips: list[str] = None cluster_name: str = None continent: str = None @@ -53,3 +58,19 @@ class Interfaces(HumanReadableEnum): MGMT: str = "mgmt" INTERNAL_0: str = "internal0" INTERNAL_1: str = "internal1" + + +@dataclass +class NodeNetInfo: + epoch: str = None + network_magic: str = None + time_per_block: str = None + container_fee: str = None + epoch_duration: str = None + inner_ring_candidate_fee: str = None + maximum_object_size: str = None + withdrawal_fee: str = None + homomorphic_hashing_disabled: str = None + maintenance_mode_allowed: str = None + eigen_trust_alpha: str = None + eigen_trust_iterations: str = None From ed8f90dfc02e30ae6bf1be124b3f346ab2bce50f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 20 Nov 2023 15:53:30 +0300 Subject: [PATCH 082/274] Change output time format to unix Signed-off-by: Dmitriy Zayakin --- .../storage/controllers/cluster_state_controller.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 27fa034..825f2ac 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -410,8 +410,8 @@ class ClusterStateController: @reporter.step_deco("Set node time to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() - shell.exec(f"hwclock --set --date='{in_date}'") - shell.exec("hwclock --hctosys") + shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") + shell.exec("hwclock --systohc") node_time = self.get_node_date(node) with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) @@ -421,8 +421,8 @@ class ClusterStateController: shell = node.host.get_shell() now_time = datetime.datetime.now(datetime.timezone.utc) with reporter.step(f"Set {now_time} time"): - shell.exec(f"hwclock --set --date='{now_time}'") - shell.exec("hwclock --hctosys") + shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") + shell.exec("hwclock --systohc") @reporter.step_deco("Change the synchronizer status to {status}") def set_sync_date_all_nodes(self, status: str): From 9ab4def44f5b9a81f960cce2d1a3b46664b5ecb6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 20 Nov 2023 17:39:15 +0300 Subject: [PATCH 083/274] Store k6 output and add socket info collection Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/__init__.py | 3 +- src/frostfs_testlib/load/interfaces/loader.py | 14 +++++ .../scenario_runner.py} | 18 ++----- src/frostfs_testlib/load/k6.py | 54 +++++++------------ src/frostfs_testlib/load/loaders.py | 2 +- src/frostfs_testlib/load/runners.py | 53 +++++++++--------- .../controllers/background_load_controller.py | 26 +++------ .../controllers/cluster_state_controller.py | 10 ++-- src/frostfs_testlib/utils/failover_utils.py | 29 ++++++---- 9 files changed, 99 insertions(+), 110 deletions(-) create mode 100644 src/frostfs_testlib/load/interfaces/loader.py rename src/frostfs_testlib/load/{interfaces.py => interfaces/scenario_runner.py} (79%) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py index ca2f120..8477ee4 100644 --- a/src/frostfs_testlib/load/__init__.py +++ b/src/frostfs_testlib/load/__init__.py @@ -1,4 +1,5 @@ -from frostfs_testlib.load.interfaces import Loader, ScenarioRunner +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.load_config import ( EndpointSelectionStrategy, K6ProcessAllocationStrategy, diff --git a/src/frostfs_testlib/load/interfaces/loader.py b/src/frostfs_testlib/load/interfaces/loader.py new file mode 100644 index 0000000..2c818d9 --- /dev/null +++ b/src/frostfs_testlib/load/interfaces/loader.py @@ -0,0 +1,14 @@ +from abc import ABC, abstractmethod + +from frostfs_testlib.shell.interfaces import Shell + + +class Loader(ABC): + @abstractmethod + def get_shell(self) -> Shell: + """Get shell for the loader""" + + @property + @abstractmethod + def ip(self): + """Get address of the loader""" diff --git a/src/frostfs_testlib/load/interfaces.py b/src/frostfs_testlib/load/interfaces/scenario_runner.py similarity index 79% rename from src/frostfs_testlib/load/interfaces.py rename to src/frostfs_testlib/load/interfaces/scenario_runner.py index 394fff7..45c1317 100644 --- a/src/frostfs_testlib/load/interfaces.py +++ b/src/frostfs_testlib/load/interfaces/scenario_runner.py @@ -1,20 +1,8 @@ from abc import ABC, abstractmethod +from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import LoadParams -from frostfs_testlib.shell.interfaces import Shell from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - - -class Loader(ABC): - @abstractmethod - def get_shell(self) -> Shell: - """Get shell for the loader""" - - @property - @abstractmethod - def ip(self): - """Get address of the loader""" class ScenarioRunner(ABC): @@ -32,6 +20,10 @@ class ScenarioRunner(ABC): def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): """Init K6 instances""" + @abstractmethod + def get_k6_instances(self) -> list[K6]: + """Get K6 instances""" + @abstractmethod def start(self): """Start K6 instances""" diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index e46221e..3dedd53 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -8,13 +8,8 @@ from time import sleep from typing import Any from urllib.parse import urlparse -from frostfs_testlib.load.interfaces import Loader -from frostfs_testlib.load.load_config import ( - K6ProcessAllocationStrategy, - LoadParams, - LoadScenario, - LoadType, -) +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import STORAGE_USER_NAME @@ -59,6 +54,7 @@ class K6: self.loader: Loader = loader self.shell: Shell = shell self.wallet = wallet + self.preset_output: str = "" self.summary_json: str = os.path.join( self.load_params.working_dir, f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json", @@ -101,10 +97,10 @@ class K6: command = " ".join(command_args) result = self.shell.exec(command) - assert ( - result.return_code == EXIT_RESULT_CODE - ), f"Return code of preset is not zero: {result.stdout}" - return result.stdout.strip("\n") + assert result.return_code == EXIT_RESULT_CODE, f"Return code of preset is not zero: {result.stdout}" + + self.preset_output = result.stdout.strip("\n") + return self.preset_output @reporter.step_deco("Generate K6 command") def _generate_env_variables(self) -> str: @@ -113,31 +109,21 @@ class K6: env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) env_vars["SUMMARY_JSON"] = self.summary_json - reporter.attach( - "\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables" - ) - return " ".join( - [f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None] - ) + reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") + return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) def start(self) -> None: - with reporter.step( - f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}" - ): + with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): self._start_time = int(datetime.utcnow().timestamp()) command = ( f"{self._k6_dir}/k6 run {self._generate_env_variables()} " f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" ) user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None - self._k6_process = RemoteProcess.create( - command, self.shell, self.load_params.working_dir, user - ) + self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user) def wait_until_finished(self, soft_timeout: int = 0) -> None: - with reporter.step( - f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}" - ): + with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): if self.load_params.scenario == LoadScenario.VERIFY: timeout = self.load_params.verify_time or 0 else: @@ -180,9 +166,11 @@ class K6: while timeout > 0: if not self._k6_process.running(): return - remaining_time_hours = f"{timeout//3600}h" if timeout//3600 != 0 else "" - remaining_time_minutes = f"{timeout//60%60}m" if timeout//60%60 != 0 else "" - logger.info(f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds...") + remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" + remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" + logger.info( + f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds..." + ) sleep(wait_interval) timeout -= min(timeout, wait_interval) wait_interval = max( @@ -198,9 +186,7 @@ class K6: raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") def get_results(self) -> Any: - with reporter.step( - f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}" - ): + with reporter.step(f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}"): self.__log_output() if not self.summary_json: @@ -231,9 +217,7 @@ class K6: return False @reporter.step_deco("Wait until K6 process end") - @wait_for_success( - K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout" - ) + @wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") def _wait_until_process_end(self): return self._k6_process.running() diff --git a/src/frostfs_testlib/load/loaders.py b/src/frostfs_testlib/load/loaders.py index 9e92155..1e0e97f 100644 --- a/src/frostfs_testlib/load/loaders.py +++ b/src/frostfs_testlib/load/loaders.py @@ -1,4 +1,4 @@ -from frostfs_testlib.load.interfaces import Loader +from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.resources.load_params import ( LOAD_NODE_SSH_PASSWORD, LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 982cfcc..ea5a374 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -10,7 +10,8 @@ from urllib.parse import urlparse import yaml from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate -from frostfs_testlib.load.interfaces import Loader, ScenarioRunner +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader @@ -50,6 +51,9 @@ class RunnerBase(ScenarioRunner): return any([future.result() for future in futures]) + def get_k6_instances(self): + return self.k6_instances + class DefaultRunner(RunnerBase): loaders: list[Loader] @@ -391,6 +395,7 @@ class LocalRunner(RunnerBase): return results + class S3LocalRunner(LocalRunner): endpoints: list[str] k6_dir: str @@ -404,7 +409,8 @@ class S3LocalRunner(LocalRunner): @reporter.step_deco("Resolve containers in preset") def _resolve_containers_in_preset(self, k6_instance: K6): k6_instance.shell.exec( - f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}") + f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}" + ) @reporter.step_deco("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): @@ -426,9 +432,9 @@ class S3LocalRunner(LocalRunner): # If we chmod /home/ folder we can no longer ssh to the node # !! IMPORTANT !! if ( - load_params.working_dir - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" + load_params.working_dir + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" ): shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") @@ -444,30 +450,25 @@ class S3LocalRunner(LocalRunner): @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Preparation steps") def prepare( - self, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - k6_dir: str, + self, + load_params: LoadParams, + cluster_nodes: list[ClusterNode], + nodes_under_load: list[ClusterNode], + k6_dir: str, ): self.k6_dir = k6_dir with reporter.step("Init s3 client on loaders"): storage_node = nodes_under_load[0].service(StorageNode) - s3_public_keys = [ - node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes - ] + s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] grpc_peer = storage_node.get_rpc_endpoint() parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer) @reporter.step_deco("Prepare node {cluster_node}") - def prepare_node(self, - cluster_node: ClusterNode, - k6_dir: str, - load_params: LoadParams, - s3_public_keys: list[str], - grpc_peer: str): - LocalRunner.prepare_node(self,cluster_node, k6_dir, load_params) + def prepare_node( + self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str + ): + LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params) self.endpoints = cluster_node.s3_gate.get_all_endpoints() shell = cluster_node.host.get_shell() @@ -497,9 +498,9 @@ class S3LocalRunner(LocalRunner): wallet_password=self.wallet.password, ).stdout aws_access_key_id = str( - re.search( - r"access_key_id.*:\s.(?P\w*)", issue_secret_output - ).group("aws_access_key_id") + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( + "aws_access_key_id" + ) ) aws_secret_access_key = str( re.search( @@ -509,10 +510,8 @@ class S3LocalRunner(LocalRunner): ) configure_input = [ InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), - InteractiveInput( - prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key - ), + InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key), InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""), ] - shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) \ No newline at end of file + shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index a18a603..8ecada8 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -2,13 +2,8 @@ import copy from typing import Optional import frostfs_testlib.resources.optionals as optionals -from frostfs_testlib.load.interfaces import ScenarioRunner -from frostfs_testlib.load.load_config import ( - EndpointSelectionStrategy, - LoadParams, - LoadScenario, - LoadType, -) +from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner +from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.reporter import get_reporter @@ -56,9 +51,7 @@ class BackgroundLoadController: raise RuntimeError("endpoint_selection_strategy should not be None") @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) - def _get_endpoints( - self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy - ): + def _get_endpoints(self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy): all_endpoints = { LoadType.gRPC: { EndpointSelectionStrategy.ALL: list( @@ -85,10 +78,7 @@ class BackgroundLoadController: ) ), EndpointSelectionStrategy.FIRST: list( - set( - node_under_load.service(S3Gate).get_endpoint() - for node_under_load in self.nodes_under_load - ) + set(node_under_load.service(S3Gate).get_endpoint() for node_under_load in self.nodes_under_load) ), }, } @@ -98,12 +88,8 @@ class BackgroundLoadController: @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Prepare load instances") def prepare(self): - self.endpoints = self._get_endpoints( - self.load_params.load_type, self.load_params.endpoint_selection_strategy - ) - self.runner.prepare( - self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir - ) + self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) + self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 825f2ac..000bdd8 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -109,12 +109,14 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start host of node {node}") - def start_node_host(self, node: ClusterNode): + def start_node_host(self, node: ClusterNode, tree_healthcheck: bool = True): with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() wait_for_host_online(self.shell, node.storage_node) + self.stopped_nodes.remove(node) wait_for_node_online(node.storage_node) - self.stopped_nodes.remove(node) + if tree_healthcheck: + self.wait_tree_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped hosts") @@ -364,7 +366,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Hard reboot host {node} via magic SysRq option") - def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True): + def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, tree_healthcheck: bool = True): shell = node.host.get_shell() shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') @@ -381,6 +383,8 @@ class ClusterStateController: time.sleep(10) wait_for_host_online(self.shell, node.storage_node) wait_for_node_online(node.storage_node) + if tree_healthcheck: + self.wait_tree_healthcheck() @reporter.step_deco("Down {interface} to {nodes}") def down_interface(self, nodes: list[ClusterNode], interface: str): diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index 8c6062f..27cd181 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -12,6 +12,7 @@ from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain +from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.test_control import retry, wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time @@ -26,12 +27,17 @@ def ping_host(shell: Shell, host: Host): return shell.exec(f"ping {host.config.address} -c 1", options).return_code +# TODO: Move to ClusterStateController @reporter.step_deco("Wait for storage nodes returned to cluster") def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None: - for node in cluster.services(StorageNode): - with reporter.step(f"Run health check for storage at '{node}'"): - wait_for_host_online(shell, node) - wait_for_node_online(node) + nodes = cluster.services(StorageNode) + parallel(_wait_for_storage_node, nodes, shell=shell) + + +@reporter.step_deco("Run health check for storage at '{node}'") +def _wait_for_storage_node(node: StorageNode, shell: Shell) -> None: + wait_for_host_online(shell, node) + wait_for_node_online(node) @retry(max_attempts=60, sleep_interval=5, expected_result=0) @@ -64,10 +70,17 @@ def wait_for_node_online(node: StorageNode): except Exception as err: logger.warning(f"Node healthcheck fails with error {err}") return False + finally: + gather_socket_info(node) return health_check.health_status == "READY" and health_check.network_status == "ONLINE" +@reporter.step_deco("Gather socket info for {node}") +def gather_socket_info(node: StorageNode): + node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) + + @reporter.step_deco("Check and return status of given service") def service_status(service: str, shell: Shell) -> str: return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() @@ -139,9 +152,7 @@ def multiple_restart( service_name = node.service(service_type).name for _ in range(count): node.host.restart_service(service_name) - logger.info( - f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue" - ) + logger.info(f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue") sleep(sleep_interval) @@ -164,9 +175,7 @@ def check_services_status(service_list: list[str], expected_status: str, shell: @reporter.step_deco("Wait for active status of passed service") @wait_for_success(60, 5) -def wait_service_in_desired_state( - service: str, shell: Shell, expected_status: Optional[str] = "active" -): +def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"): real_status = service_status(service=service, shell=shell) assert ( expected_status == real_status From 253bb3b1d81eb66b3e145bd777e08c99e3377ee5 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 22 Nov 2023 17:10:09 +0300 Subject: [PATCH 084/274] [126] small healthcheck and stop start hosts rework Signed-off-by: Andrey Berezin --- .../healthcheck/basic_healthcheck.py | 67 ++++++++++++++--- src/frostfs_testlib/healthcheck/interfaces.py | 10 +-- .../controllers/cluster_state_controller.py | 74 +++++++++++++------ src/frostfs_testlib/utils/failover_utils.py | 66 +---------------- 4 files changed, 112 insertions(+), 105 deletions(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 9c1d151..6f21534 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -1,22 +1,65 @@ +from typing import Callable + from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC +from frostfs_testlib.shell import CommandOptions from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import wait_for_success reporter = get_reporter() class BasicHealthcheck(Healthcheck): - @reporter.step_deco("Perform healthcheck for {cluster_node}") - def perform(self, cluster_node: ClusterNode): - result = self.storage_healthcheck(cluster_node) - if result: - raise AssertionError(result) + def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]): + issues: list[str] = [] + for check, kwargs in checks.items(): + issue = check(cluster_node, **kwargs) + if issue: + issues.append(issue) + + assert not issues, "Issues found:\n" + "\n".join(issues) + + @wait_for_success(900, 30) + def full_healthcheck(self, cluster_node: ClusterNode): + checks = { + self.storage_healthcheck: {}, + self._tree_healthcheck: {}, + } + + with reporter.step(f"Perform full healthcheck for {cluster_node}"): + self._perform(cluster_node, checks) + + @wait_for_success(900, 30) + def startup_healthcheck(self, cluster_node: ClusterNode): + checks = { + self.storage_healthcheck: {}, + self._tree_healthcheck: {}, + } + + with reporter.step(f"Perform startup healthcheck on {cluster_node}"): + self._perform(cluster_node, checks) + + @wait_for_success(900, 30) + def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: + checks = { + self._storage_healthcheck: {}, + } + + with reporter.step(f"Perform storage healthcheck on {cluster_node}"): + self._perform(cluster_node, checks) + + @reporter.step_deco("Storage healthcheck on {cluster_node}") + def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: + result = storage_node_healthcheck(cluster_node.storage_node) + self._gather_socket_info(cluster_node) + if result.health_status != "READY" or result.network_status != "ONLINE": + return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" @reporter.step_deco("Tree healthcheck on {cluster_node}") - def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: + def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: host = cluster_node.host service_config = host.get_service_config(cluster_node.storage_node.name) wallet_path = service_config.attributes["wallet_path"] @@ -34,10 +77,10 @@ class BasicHealthcheck(Healthcheck): ) result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080") if result.return_code != 0: - return f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" + return ( + f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" + ) - @reporter.step_deco("Storage healthcheck on {cluster_node}") - def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: - result = storage_node_healthcheck(cluster_node.storage_node) - if result.health_status != "READY" or result.network_status != "ONLINE": - return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" + @reporter.step_deco("Gather socket info for {cluster_node}") + def _gather_socket_info(self, cluster_node: ClusterNode): + cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py index a036a82..83fa021 100644 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -5,13 +5,13 @@ from frostfs_testlib.storage.cluster import ClusterNode class Healthcheck(ABC): @abstractmethod - def perform(self, cluster_node: ClusterNode): - """Perform healthcheck on the target cluster node""" + def full_healthcheck(self, cluster_node: ClusterNode): + """Perform full healthcheck on the target cluster node""" @abstractmethod - def tree_healthcheck(self, cluster_node: ClusterNode): - """Check tree sync status on target cluster node""" + def startup_healthcheck(self, cluster_node: ClusterNode): + """Perform healthcheck required on startup of target cluster node""" @abstractmethod def storage_healthcheck(self, cluster_node: ClusterNode): - """Perform storage node healthcheck on target cluster node""" + """Perform storage service healthcheck on target cluster node""" diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 000bdd8..7020671 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,4 +1,5 @@ import datetime +import logging import time from typing import TypeVar @@ -6,6 +7,7 @@ import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.healthcheck.interfaces import Healthcheck +from frostfs_testlib.hosting.interfaces import HostStatus from frostfs_testlib.plugins import load_all from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC @@ -16,16 +18,11 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, Storag from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.testing import parallel -from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time -from frostfs_testlib.utils.failover_utils import ( - wait_all_storage_nodes_returned, - wait_for_host_offline, - wait_for_host_online, - wait_for_node_online, -) reporter = get_reporter() +logger = logging.getLogger("NeoLogger") if_up_down_helper = IfUpDownHelper() @@ -88,7 +85,7 @@ class ClusterStateController: self.stopped_nodes.append(node) with reporter.step(f"Stop host {node.host.config.address}"): node.host.stop_host(mode=mode) - wait_for_host_offline(self.shell, node.storage_node) + self._wait_for_host_offline(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Shutdown whole cluster") @@ -105,18 +102,17 @@ class ClusterStateController: node.host.stop_host(mode=mode) for node in nodes: - wait_for_host_offline(self.shell, node.storage_node) + self._wait_for_host_offline(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start host of node {node}") - def start_node_host(self, node: ClusterNode, tree_healthcheck: bool = True): + def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True): with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() - wait_for_host_online(self.shell, node.storage_node) + self._wait_for_host_online(node) self.stopped_nodes.remove(node) - wait_for_node_online(node.storage_node) - if tree_healthcheck: - self.wait_tree_healthcheck() + if startup_healthcheck: + self.wait_startup_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped hosts") @@ -131,6 +127,9 @@ class ClusterStateController: self.stopped_services.difference_update(self._get_stopped_by_node(node)) self.stopped_nodes = [] + with reporter.step("Wait for all nodes to go online"): + parallel(self._wait_for_host_online, self.cluster.cluster_nodes) + self.wait_after_storage_startup() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -183,16 +182,15 @@ class ClusterStateController: if online_s3gates: parallel(self.wait_s3gate, online_s3gates) - @wait_for_success(600, 60) - def wait_tree_healthcheck(self): + @reporter.step_deco("Wait for cluster startup healtcheck") + def wait_startup_healthcheck(self): nodes = self.cluster.nodes(self._get_online(StorageNode)) - parallel(self.healthcheck.tree_healthcheck, nodes) + parallel(self.healthcheck.startup_healthcheck, nodes) @reporter.step_deco("Wait for storage reconnection to the system") def wait_after_storage_startup(self): - wait_all_storage_nodes_returned(self.shell, self.cluster) + self.wait_startup_healthcheck() self.wait_s3gates() - self.wait_tree_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start all stopped services") @@ -366,7 +364,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Hard reboot host {node} via magic SysRq option") - def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, tree_healthcheck: bool = True): + def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True): shell = node.host.get_shell() shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') @@ -381,10 +379,9 @@ class ClusterStateController: # Let the things to be settled # A little wait here to prevent ssh stuck during panic time.sleep(10) - wait_for_host_online(self.shell, node.storage_node) - wait_for_node_online(node.storage_node) - if tree_healthcheck: - self.wait_tree_healthcheck() + self._wait_for_host_online(node) + if startup_healthcheck: + self.wait_startup_healthcheck() @reporter.step_deco("Down {interface} to {nodes}") def down_interface(self, nodes: list[ClusterNode], interface: str): @@ -539,3 +536,32 @@ class ClusterStateController: if "mgmt" not in type: interfaces.append(ip) return interfaces + + @reporter.step_deco("Ping node") + def _ping_host(self, node: ClusterNode): + options = CommandOptions(check=False) + return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code + + @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.ONLINE) + @reporter.step_deco("Waiting for {node} to go online") + def _wait_for_host_online(self, node: ClusterNode): + try: + ping_result = self._ping_host(node) + if ping_result != 0: + return HostStatus.OFFLINE + return node.host.get_host_status() + except Exception as err: + logger.warning(f"Host ping fails with error {err}") + return HostStatus.OFFLINE + + @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.OFFLINE) + @reporter.step_deco("Waiting for {node} to go offline") + def _wait_for_host_offline(self, node: ClusterNode): + try: + ping_result = self._ping_host(node) + if ping_result == 0: + return HostStatus.ONLINE + return node.host.get_host_status() + except Exception as err: + logger.warning(f"Host ping fails with error {err}") + return HostStatus.ONLINE diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index 27cd181..d4892c4 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -3,17 +3,15 @@ from dataclasses import dataclass from time import sleep from typing import Optional -from frostfs_testlib.hosting import Host from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME -from frostfs_testlib.shell import CommandOptions, Shell +from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import neo_go_dump_keys from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain -from frostfs_testlib.testing.parallel import parallel -from frostfs_testlib.testing.test_control import retry, wait_for_success +from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time reporter = get_reporter() @@ -21,66 +19,6 @@ reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Ping node") -def ping_host(shell: Shell, host: Host): - options = CommandOptions(check=False) - return shell.exec(f"ping {host.config.address} -c 1", options).return_code - - -# TODO: Move to ClusterStateController -@reporter.step_deco("Wait for storage nodes returned to cluster") -def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None: - nodes = cluster.services(StorageNode) - parallel(_wait_for_storage_node, nodes, shell=shell) - - -@reporter.step_deco("Run health check for storage at '{node}'") -def _wait_for_storage_node(node: StorageNode, shell: Shell) -> None: - wait_for_host_online(shell, node) - wait_for_node_online(node) - - -@retry(max_attempts=60, sleep_interval=5, expected_result=0) -@reporter.step_deco("Waiting for host of {node} to go online") -def wait_for_host_online(shell: Shell, node: StorageNode): - try: - # TODO: Quick solution for now, should be replaced by lib interactions - return ping_host(shell, node.host) - except Exception as err: - logger.warning(f"Host ping fails with error {err}") - return 1 - - -@retry(max_attempts=60, sleep_interval=5, expected_result=1) -@reporter.step_deco("Waiting for host of {node} to go offline") -def wait_for_host_offline(shell: Shell, node: StorageNode): - try: - # TODO: Quick solution for now, should be replaced by lib interactions - return ping_host(shell, node.host) - except Exception as err: - logger.warning(f"Host ping fails with error {err}") - return 0 - - -@retry(max_attempts=20, sleep_interval=30, expected_result=True) -@reporter.step_deco("Waiting for node {node} to go online") -def wait_for_node_online(node: StorageNode): - try: - health_check = storage_node_healthcheck(node) - except Exception as err: - logger.warning(f"Node healthcheck fails with error {err}") - return False - finally: - gather_socket_info(node) - - return health_check.health_status == "READY" and health_check.network_status == "ONLINE" - - -@reporter.step_deco("Gather socket info for {node}") -def gather_socket_info(node: StorageNode): - node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) - - @reporter.step_deco("Check and return status of given service") def service_status(service: str, shell: Shell) -> str: return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() From f072f88673dfddcbf5064b49bbe1b123957685fb Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 22 Nov 2023 19:54:39 +0300 Subject: [PATCH 085/274] [#127] Change service registration Signed-off-by: Andrey Berezin --- pyproject.toml | 6 ++++++ src/frostfs_testlib/plugins/__init__.py | 2 +- src/frostfs_testlib/storage/__init__.py | 15 --------------- 3 files changed, 7 insertions(+), 16 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 34a37e3..7d3e5b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,6 +50,12 @@ basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" [project.entry-points."frostfs.testlib.csc_managers"] config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager" +[project.entry-points."frostfs.testlib.services"] +s = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode" +s3-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate" +http-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" +morph-chain = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" +ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" [tool.isort] profile = "black" diff --git a/src/frostfs_testlib/plugins/__init__.py b/src/frostfs_testlib/plugins/__init__.py index 79de340..26b2441 100644 --- a/src/frostfs_testlib/plugins/__init__.py +++ b/src/frostfs_testlib/plugins/__init__.py @@ -23,7 +23,7 @@ def load_all(group: str) -> Any: """Loads all plugins using entry point specification. Args: - plugin_group: Name of plugin group. + group: Name of plugin group. Returns: Classes from specified group. diff --git a/src/frostfs_testlib/storage/__init__.py b/src/frostfs_testlib/storage/__init__.py index 3562d25..cbbef84 100644 --- a/src/frostfs_testlib/storage/__init__.py +++ b/src/frostfs_testlib/storage/__init__.py @@ -1,22 +1,7 @@ -from frostfs_testlib.storage.constants import _FrostfsServicesNames -from frostfs_testlib.storage.dataclasses.frostfs_services import ( - HTTPGate, - InnerRing, - MorphChain, - S3Gate, - StorageNode, -) from frostfs_testlib.storage.service_registry import ServiceRegistry __class_registry = ServiceRegistry() -# Register default public services -__class_registry.register_service(_FrostfsServicesNames.STORAGE, StorageNode) -__class_registry.register_service(_FrostfsServicesNames.INNER_RING, InnerRing) -__class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain) -__class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate) -__class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate) - def get_service_registry() -> ServiceRegistry: """Returns registry with registered classes related to cluster and cluster nodes. From d1ba7eb66181a7335b0e0b5cd8a1271f4b078015 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 23 Nov 2023 08:03:31 +0300 Subject: [PATCH 086/274] Change local timeout Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/s3/aws_cli_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 320d74b..59ee740 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -17,7 +17,7 @@ from frostfs_testlib.utils.cli_utils import _configure_aws_cli reporter = get_reporter() logger = logging.getLogger("NeoLogger") -command_options = CommandOptions(timeout=240) +command_options = CommandOptions(timeout=480) class AwsCliClient(S3ClientWrapper): From c17f0f6173bfc04b623dc0385c425573a59e78ef Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 24 Nov 2023 19:46:35 +0300 Subject: [PATCH 087/274] [#130] Add service healthcheck and allow to skip version check for some binaries Signed-off-by: Andrey Berezin --- .../healthcheck/basic_healthcheck.py | 23 ++++++++++- src/frostfs_testlib/healthcheck/interfaces.py | 4 ++ src/frostfs_testlib/storage/cluster.py | 20 +++++++++- src/frostfs_testlib/utils/failover_utils.py | 7 ++-- src/frostfs_testlib/utils/version_utils.py | 39 ++++++++++--------- 5 files changed, 69 insertions(+), 24 deletions(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 6f21534..4cb3a48 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -6,8 +6,9 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.shell import CommandOptions from frostfs_testlib.steps.node_management import storage_node_healthcheck -from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils.failover_utils import check_services_status reporter = get_reporter() @@ -51,6 +52,26 @@ class BasicHealthcheck(Healthcheck): with reporter.step(f"Perform storage healthcheck on {cluster_node}"): self._perform(cluster_node, checks) + @wait_for_success(120, 5) + def services_healthcheck(self, cluster_node: ClusterNode): + svcs_to_check = cluster_node.services + checks = { + check_services_status: { + "service_list": svcs_to_check, + "expected_status": "active", + }, + self._check_services: {"services": svcs_to_check}, + } + + with reporter.step(f"Perform service healthcheck on {cluster_node}"): + self._perform(cluster_node, checks) + + def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]): + for svc in services: + result = svc.service_healthcheck() + if result == False: + return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}." + @reporter.step_deco("Storage healthcheck on {cluster_node}") def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: result = storage_node_healthcheck(cluster_node.storage_node) diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py index 83fa021..c665b8a 100644 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -15,3 +15,7 @@ class Healthcheck(ABC): @abstractmethod def storage_healthcheck(self, cluster_node: ClusterNode): """Perform storage service healthcheck on target cluster node""" + + @abstractmethod + def services_healthcheck(self, cluster_node: ClusterNode): + """Perform service status check on target cluster node""" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index b8c32ca..02601ac 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -116,8 +116,24 @@ class ClusterNode: self.host, ) - def get_list_of_services(self) -> list[str]: - return [config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services] + @property + def services(self) -> list[NodeBase]: + svcs: list[NodeBase] = [] + svcs_names_on_node = [svc.name for svc in self.host.config.services] + for entry in self.class_registry._class_mapping.values(): + hosting_svc_name = entry["hosting_service_name"] + pattern = f"{hosting_svc_name}{self.id:02}" + if pattern in svcs_names_on_node: + config = self.host.get_service_config(pattern) + svcs.append( + entry["cls"]( + self.id, + config.name, + self.host, + ) + ) + + return svcs def get_all_interfaces(self) -> dict[str, str]: return self.host.config.interfaces diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index d4892c4..507168e 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -11,6 +11,7 @@ from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain +from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time @@ -96,11 +97,11 @@ def multiple_restart( @reporter.step_deco("Get status of list of services and check expected status") @wait_for_success(60, 5) -def check_services_status(service_list: list[str], expected_status: str, shell: Shell): +def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str): cmd = "" for service in service_list: - cmd += f' sudo systemctl status {service} --lines=0 | grep "Active:";' - result = shell.exec(cmd).stdout.rstrip() + cmd += f' sudo systemctl status {service.get_service_systemctl_name()} --lines=0 | grep "Active:";' + result = cluster_node.host.get_shell().exec(cmd).stdout.rstrip() statuses = list() for line in result.split("\n"): status_substring = line.split() diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 68f8578..42bde6d 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -3,12 +3,7 @@ import re from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.hosting import Hosting -from frostfs_testlib.resources.cli import ( - FROSTFS_ADM_EXEC, - FROSTFS_AUTHMATE_EXEC, - FROSTFS_CLI_EXEC, - NEOGO_EXECUTABLE, -) +from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -44,36 +39,44 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: binary_path_by_name = {} # Maps binary name to executable path for service_config in host.config.services: exec_path = service_config.attributes.get("exec_path") + requires_check = service_config.attributes.get("requires_version_check", "true") if exec_path: - binary_path_by_name[service_config.name] = exec_path + binary_path_by_name[service_config.name] = { + "exec_path": exec_path, + "check": requires_check.lower() == "true", + } for cli_config in host.config.clis: - binary_path_by_name[cli_config.name] = cli_config.exec_path + requires_check = cli_config.attributes.get("requires_version_check", "true") + binary_path_by_name[cli_config.name] = { + "exec_path": cli_config.exec_path, + "check": requires_check.lower() == "true", + } shell = host.get_shell() versions_at_host = {} - for binary_name, binary_path in binary_path_by_name.items(): + for binary_name, binary in binary_path_by_name.items(): try: + binary_path = binary["exec_path"] result = shell.exec(f"{binary_path} --version") - versions_at_host[binary_name] = _parse_version(result.stdout) + versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") - versions_at_host[binary_name] = "Unknown" + versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} versions_by_host[host.config.address] = versions_at_host # Consolidate versions across all hosts versions = {} for host, binary_versions in versions_by_host.items(): - for name, version in binary_versions.items(): - captured_version = versions.get(name) + for name, binary in binary_versions.items(): + captured_version = versions.get(name, {}).get("version") + version = binary["version"] if captured_version: - assert ( - captured_version == version - ), f"Binary {name} has inconsistent version on host {host}" + assert captured_version == version, f"Binary {name} has inconsistent version on host {host}" else: - versions[name] = version + versions[name] = {"version": version, "check": binary["check"]} return versions def _parse_version(version_output: str) -> str: version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) - return version.group(1).strip() if version else "Unknown" + return version.group(1).strip() if version else version_output From 47414eb86630224f1cc9a19178307e921fad45c3 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Fri, 24 Nov 2023 16:32:26 +0300 Subject: [PATCH 088/274] Support of AWS profiles --- src/frostfs_testlib/s3/aws_cli_client.py | 103 ++++++++++++----------- src/frostfs_testlib/s3/boto3_client.py | 4 +- src/frostfs_testlib/s3/interfaces.py | 2 +- 3 files changed, 55 insertions(+), 54 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 59ee740..059e949 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -29,14 +29,15 @@ class AwsCliClient(S3ClientWrapper): s3gate_endpoint: str @reporter.step_deco("Configure S3 client (aws cli)") - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: self.s3gate_endpoint = s3gate_endpoint + self.profile = profile self.local_shell = LocalShell() try: - _configure_aws_cli("aws configure", access_key_id, secret_access_key) - self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") + _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key) + self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") self.local_shell.exec( - f"aws configure set retry_mode {RETRY_MODE}", + f"aws configure set retry_mode {RETRY_MODE} --profile {profile}", ) except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err @@ -67,7 +68,7 @@ class AwsCliClient(S3ClientWrapper): object_lock = " --no-object-lock-enabled-for-bucket" cmd = ( f"aws {self.common_flags} s3api create-bucket --bucket {bucket} " - f"{object_lock} --endpoint {self.s3gate_endpoint}" + f"{object_lock} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if acl: cmd += f" --acl {acl}" @@ -86,20 +87,20 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List buckets S3") def list_buckets(self) -> list[str]: - cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout buckets_json = self._to_json(output) return [bucket["Name"] for bucket in buckets_json["Buckets"]] @reporter.step_deco("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: - cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd, command_options) sleep(S3_SYNC_WAIT_TIME) @reporter.step_deco("Head bucket S3") def head_bucket(self, bucket: str) -> None: - cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd) @reporter.step_deco("Put bucket versioning status") @@ -107,7 +108,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " f"--versioning-configuration Status={status.value} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -115,7 +116,7 @@ class AwsCliClient(S3ClientWrapper): def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: cmd = ( f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -126,14 +127,14 @@ class AwsCliClient(S3ClientWrapper): tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} cmd = ( f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " - f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}" + f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @reporter.step_deco("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: cmd = ( - f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -141,7 +142,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: - cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -149,7 +150,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: cmd = ( - f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -157,7 +158,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -168,7 +169,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List objects S3 v2") def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -181,7 +182,7 @@ class AwsCliClient(S3ClientWrapper): def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -191,7 +192,7 @@ class AwsCliClient(S3ClientWrapper): def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -218,7 +219,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api copy-object --copy-source {copy_source} " - f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint}" + f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if acl: cmd += f" --acl {acl}" @@ -255,7 +256,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api put-object --bucket {bucket} --key {key} " - f"--body {filepath} --endpoint {self.s3gate_endpoint}" + f"--body {filepath} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if metadata: cmd += " --metadata" @@ -284,7 +285,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint}" + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -303,7 +304,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " - f"{version} {file_path} --endpoint {self.s3gate_endpoint}" + f"{version} {file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if object_range: cmd += f" --range bytes={object_range[0]}-{object_range[1]}" @@ -316,7 +317,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint}" + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -333,7 +334,7 @@ class AwsCliClient(S3ClientWrapper): ) -> list: cmd = ( f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " - f" --endpoint {self.s3gate_endpoint}" + f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if acl: cmd += f" --acl {acl}" @@ -353,7 +354,7 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" if acl: cmd += f" --acl {acl}" if grant_write: @@ -372,7 +373,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " - f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) @@ -384,7 +385,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api delete-object --bucket {bucket} " - f"--key {key} {version} --endpoint {self.s3gate_endpoint}" + f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout sleep(S3_SYNC_WAIT_TIME) @@ -411,7 +412,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " - f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout sleep(S3_SYNC_WAIT_TIME) @@ -442,7 +443,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -457,7 +458,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket policy") def get_bucket_policy(self, bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Policy") @@ -473,13 +474,13 @@ class AwsCliClient(S3ClientWrapper): dumped_policy = json.dumps(json.dumps(policy)) cmd = ( f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " - f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint}" + f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @reporter.step_deco("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("CORSRules") @@ -488,14 +489,14 @@ class AwsCliClient(S3ClientWrapper): def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: cmd = ( f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " - f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint}" + f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @reporter.step_deco("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: cmd = ( - f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -503,7 +504,7 @@ class AwsCliClient(S3ClientWrapper): def delete_bucket_tagging(self, bucket: str) -> None: cmd = ( f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -519,7 +520,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " - f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}" + f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if bypass_governance_retention is not None: cmd += " --bypass-governance-retention" @@ -537,7 +538,7 @@ class AwsCliClient(S3ClientWrapper): legal_hold = json.dumps({"Status": legal_hold_status}) cmd = ( f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " - f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint}" + f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -547,7 +548,7 @@ class AwsCliClient(S3ClientWrapper): tagging = {"TagSet": tags} cmd = ( f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " - f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint}" + f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -556,7 +557,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint}" + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -566,7 +567,7 @@ class AwsCliClient(S3ClientWrapper): def delete_object_tagging(self, bucket: str, key: str) -> None: cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " - f"--key {key} --endpoint {self.s3gate_endpoint}" + f"--key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -578,7 +579,7 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: - cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" if metadata: cmd += " --metadata" for key, value in metadata.items(): @@ -598,7 +599,7 @@ class AwsCliClient(S3ClientWrapper): ) -> dict: cmd = ( f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " - f"--endpoint-url {self.s3gate_endpoint} --recursive" + f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}" ) if metadata: cmd += " --metadata" @@ -613,7 +614,7 @@ class AwsCliClient(S3ClientWrapper): def create_multipart_upload(self, bucket: str, key: str) -> str: cmd = ( f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " - f"--key {key} --endpoint-url {self.s3gate_endpoint}" + f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -626,7 +627,7 @@ class AwsCliClient(S3ClientWrapper): def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: cmd = ( f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint}" + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -636,7 +637,7 @@ class AwsCliClient(S3ClientWrapper): def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: cmd = ( f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " - f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" + f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -645,7 +646,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " - f"--endpoint-url {self.s3gate_endpoint}" + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) @@ -657,7 +658,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " - f"--endpoint-url {self.s3gate_endpoint}" + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) @@ -669,7 +670,7 @@ class AwsCliClient(S3ClientWrapper): def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: cmd = ( f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" + f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -691,7 +692,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api complete-multipart-upload --bucket {bucket} " f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " - f"--endpoint-url {self.s3gate_endpoint}" + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -699,7 +700,7 @@ class AwsCliClient(S3ClientWrapper): def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: cmd = ( f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " - f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint}" + f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout return self._to_json(output) @@ -708,7 +709,7 @@ class AwsCliClient(S3ClientWrapper): def get_object_lock_configuration(self, bucket: str): cmd = ( f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint}" + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 2251efe..ba3716a 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -48,9 +48,9 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step_deco("Configure S3 client (boto3)") @report_error - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: self.boto3_client: S3Client = None - self.session = boto3.Session() + self.session = boto3.Session(profile_name=profile) self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 2b6be7d..dd21823 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -33,7 +33,7 @@ ACL_COPY = [ class S3ClientWrapper(HumanReadableABC): @abstractmethod - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str) -> None: pass @abstractmethod From 39a17f36346d8bb672bb97676aef51db3c073154 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 28 Nov 2023 12:28:44 +0300 Subject: [PATCH 089/274] [#132] Add steps logger and refactor reporter usage Signed-off-by: Andrey Berezin --- src/frostfs_testlib/reporter/__init__.py | 5 ++ .../reporter/allure_handler.py | 4 +- src/frostfs_testlib/reporter/interfaces.py | 4 +- src/frostfs_testlib/reporter/reporter.py | 18 ++++-- src/frostfs_testlib/reporter/steps_logger.py | 56 ++++++++++++++++++ src/frostfs_testlib/testing/parallel.py | 2 +- src/frostfs_testlib/testing/test_control.py | 28 +++++++-- src/frostfs_testlib/utils/__init__.py | 4 ++ src/frostfs_testlib/utils/func_utils.py | 58 +++++++++++++++++++ 9 files changed, 163 insertions(+), 16 deletions(-) create mode 100644 src/frostfs_testlib/reporter/steps_logger.py create mode 100644 src/frostfs_testlib/utils/func_utils.py diff --git a/src/frostfs_testlib/reporter/__init__.py b/src/frostfs_testlib/reporter/__init__.py index 10e4146..e2c113c 100644 --- a/src/frostfs_testlib/reporter/__init__.py +++ b/src/frostfs_testlib/reporter/__init__.py @@ -1,6 +1,7 @@ from frostfs_testlib.reporter.allure_handler import AllureHandler from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.reporter import Reporter +from frostfs_testlib.reporter.steps_logger import StepsLogger __reporter = Reporter() @@ -15,3 +16,7 @@ def get_reporter() -> Reporter: Singleton reporter instance. """ return __reporter + + +def step(title: str): + return __reporter.step(title) diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py index fef815d..9089f98 100644 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -1,5 +1,5 @@ import os -from contextlib import AbstractContextManager +from contextlib import AbstractContextManager, ContextDecorator from textwrap import shorten from typing import Any, Callable @@ -12,7 +12,7 @@ from frostfs_testlib.reporter.interfaces import ReporterHandler class AllureHandler(ReporterHandler): """Handler that stores test artifacts in Allure report.""" - def step(self, name: str) -> AbstractContextManager: + def step(self, name: str) -> AbstractContextManager | ContextDecorator: name = shorten(name, width=140, placeholder="...") return allure.step(name) diff --git a/src/frostfs_testlib/reporter/interfaces.py b/src/frostfs_testlib/reporter/interfaces.py index b47a3fb..4e24feb 100644 --- a/src/frostfs_testlib/reporter/interfaces.py +++ b/src/frostfs_testlib/reporter/interfaces.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from contextlib import AbstractContextManager +from contextlib import AbstractContextManager, ContextDecorator from typing import Any, Callable @@ -7,7 +7,7 @@ class ReporterHandler(ABC): """Interface of handler that stores test artifacts in some reporting tool.""" @abstractmethod - def step(self, name: str) -> AbstractContextManager: + def step(self, name: str) -> AbstractContextManager | ContextDecorator: """Register a new step in test execution. Args: diff --git a/src/frostfs_testlib/reporter/reporter.py b/src/frostfs_testlib/reporter/reporter.py index d1c75f5..2d1a43e 100644 --- a/src/frostfs_testlib/reporter/reporter.py +++ b/src/frostfs_testlib/reporter/reporter.py @@ -5,6 +5,7 @@ from typing import Any, Callable, Optional from frostfs_testlib.plugins import load_plugin from frostfs_testlib.reporter.interfaces import ReporterHandler +from frostfs_testlib.utils.func_utils import format_by_args @contextmanager @@ -63,7 +64,8 @@ class Reporter: def wrapper(*a, **kw): resulting_func = func for handler in self.handlers: - decorator = handler.step_decorator(name) + parsed_name = format_by_args(func, name, *a, **kw) + decorator = handler.step_decorator(parsed_name) resulting_func = decorator(resulting_func) return resulting_func(*a, **kw) @@ -81,11 +83,11 @@ class Reporter: Returns: Step context. """ - if not self.handlers: - return _empty_step() - step_contexts = [handler.step(name) for handler in self.handlers] - return AggregateContextManager(step_contexts) + if not step_contexts: + step_contexts = [_empty_step()] + decorated_wrapper = self.step_deco(name) + return AggregateContextManager(step_contexts, decorated_wrapper) def attach(self, content: Any, file_name: str) -> None: """Attach specified content with given file name to the test report. @@ -104,9 +106,10 @@ class AggregateContextManager(AbstractContextManager): contexts: list[AbstractContextManager] - def __init__(self, contexts: list[AbstractContextManager]) -> None: + def __init__(self, contexts: list[AbstractContextManager], decorated_wrapper: Callable) -> None: super().__init__() self.contexts = contexts + self.wrapper = decorated_wrapper def __enter__(self): for context in self.contexts: @@ -127,3 +130,6 @@ class AggregateContextManager(AbstractContextManager): # If all context agreed to suppress exception, then suppress it; # otherwise return None to reraise return True if all(suppress_decisions) else None + + def __call__(self, *args: Any, **kwds: Any) -> Any: + return self.wrapper(*args, **kwds) diff --git a/src/frostfs_testlib/reporter/steps_logger.py b/src/frostfs_testlib/reporter/steps_logger.py new file mode 100644 index 0000000..4cdfb3d --- /dev/null +++ b/src/frostfs_testlib/reporter/steps_logger.py @@ -0,0 +1,56 @@ +import logging +import threading +from contextlib import AbstractContextManager, ContextDecorator +from functools import wraps +from types import TracebackType +from typing import Any, Callable + +from frostfs_testlib.reporter.interfaces import ReporterHandler + + +class StepsLogger(ReporterHandler): + """Handler that prints steps to log.""" + + def step(self, name: str) -> AbstractContextManager | ContextDecorator: + return StepLoggerContext(name) + + def step_decorator(self, name: str) -> Callable: + return StepLoggerContext(name) + + def attach(self, body: Any, file_name: str) -> None: + pass + + +class StepLoggerContext(AbstractContextManager): + INDENT = {} + + def __init__(self, title: str): + self.title = title + self.logger = logging.getLogger("NeoLogger") + self.thread = threading.get_ident() + if self.thread not in StepLoggerContext.INDENT: + StepLoggerContext.INDENT[self.thread] = 1 + + def __enter__(self) -> Any: + indent = ">" * StepLoggerContext.INDENT[self.thread] + self.logger.info(f"[{self.thread}] {indent} {self.title}") + StepLoggerContext.INDENT[self.thread] += 1 + + def __exit__( + self, + __exc_type: type[BaseException] | None, + __exc_value: BaseException | None, + __traceback: TracebackType | None, + ) -> bool | None: + + StepLoggerContext.INDENT[self.thread] -= 1 + indent = "<" * StepLoggerContext.INDENT[self.thread] + self.logger.info(f"[{self.thread}] {indent} {self.title}") + + def __call__(self, func): + @wraps(func) + def impl(*a, **kw): + with self: + return func(*a, **kw) + + return impl diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index ebddd38..1c30cec 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -42,7 +42,7 @@ def parallel( exceptions = [future.exception() for future in futures if future.exception()] if exceptions: message = "\n".join([str(e) for e in exceptions]) - raise RuntimeError(f"The following exceptions occured during parallel run:\n {message}") + raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}") return futures diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py index ed74f6a..4fa6390 100644 --- a/src/frostfs_testlib/testing/test_control.py +++ b/src/frostfs_testlib/testing/test_control.py @@ -7,6 +7,9 @@ from typing import Any from _pytest.outcomes import Failed from pytest import fail +from frostfs_testlib import reporter +from frostfs_testlib.utils.func_utils import format_by_args + logger = logging.getLogger("NeoLogger") # TODO: we may consider deprecating some methods here and use tenacity instead @@ -50,7 +53,7 @@ class expect_not_raises: return impl -def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None): +def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None, title: str = None): """ Decorator to wait for some conditions/functions to pass successfully. This is useful if you don't know exact time when something should pass successfully and do not @@ -62,8 +65,7 @@ def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = Non assert max_attempts >= 1, "Cannot apply retry decorator with max_attempts < 1" def wrapper(func): - @wraps(func) - def impl(*a, **kw): + def call(func, *a, **kw): last_exception = None for _ in range(max_attempts): try: @@ -84,6 +86,14 @@ def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = Non if last_exception is not None: raise last_exception + @wraps(func) + def impl(*a, **kw): + if title is not None: + with reporter.step(format_by_args(func, title, *a, **kw)): + return call(func, *a, **kw) + + return call(func, *a, **kw) + return impl return wrapper @@ -124,6 +134,7 @@ def wait_for_success( expected_result: Any = None, fail_testcase: bool = False, fail_message: str = "", + title: str = None, ): """ Decorator to wait for some conditions/functions to pass successfully. @@ -134,8 +145,7 @@ def wait_for_success( """ def wrapper(func): - @wraps(func) - def impl(*a, **kw): + def call(func, *a, **kw): start = int(round(time())) last_exception = None while start + max_wait_time >= int(round(time())): @@ -160,6 +170,14 @@ def wait_for_success( if last_exception is not None: raise last_exception + @wraps(func) + def impl(*a, **kw): + if title is not None: + with reporter.step(format_by_args(func, title, *a, **kw)): + return call(func, *a, **kw) + + return call(func, *a, **kw) + return impl return wrapper diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index fbc4a8f..4acc5b1 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -1,3 +1,7 @@ +""" +Idea of utils is to have small utilitary functions which are not dependent of anything. +""" + import frostfs_testlib.utils.converting_utils import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.json_utils diff --git a/src/frostfs_testlib/utils/func_utils.py b/src/frostfs_testlib/utils/func_utils.py new file mode 100644 index 0000000..0e22d4a --- /dev/null +++ b/src/frostfs_testlib/utils/func_utils.py @@ -0,0 +1,58 @@ +import collections +import inspect +import sys +from typing import Callable + + +def format_by_args(__func: Callable, __title: str, *a, **kw) -> str: + params = _func_parameters(__func, *a, **kw) + args = list(map(lambda x: _represent(x), a)) + + return __title.format(*args, **params) + + +# These 2 functions are copied from allure_commons._allure +# Duplicate it here in order to be independent of allure and make some adjustments. +def _represent(item): + if isinstance(item, str): + return item + elif isinstance(item, (bytes, bytearray)): + return repr(type(item)) + else: + return repr(item) + + +def _func_parameters(func, *args, **kwargs): + parameters = {} + arg_spec = inspect.getfullargspec(func) + arg_order = list(arg_spec.args) + args_dict = dict(zip(arg_spec.args, args)) + + if arg_spec.defaults: + kwargs_defaults_dict = dict(zip(arg_spec.args[-len(arg_spec.defaults) :], arg_spec.defaults)) + parameters.update(kwargs_defaults_dict) + + if arg_spec.varargs: + arg_order.append(arg_spec.varargs) + varargs = args[len(arg_spec.args) :] + parameters.update({arg_spec.varargs: varargs} if varargs else {}) + + if arg_spec.args and arg_spec.args[0] in ["cls", "self"]: + args_dict.pop(arg_spec.args[0], None) + + if kwargs: + if sys.version_info < (3, 7): + # Sort alphabetically as old python versions does + # not preserve call order for kwargs. + arg_order.extend(sorted(list(kwargs.keys()))) + else: + # Keep py3.7 behaviour to preserve kwargs order + arg_order.extend(list(kwargs.keys())) + parameters.update(kwargs) + + parameters.update(args_dict) + + items = parameters.items() + sorted_items = sorted(map(lambda kv: (kv[0], _represent(kv[1])), items), key=lambda x: arg_order.index(x[0])) + + return collections.OrderedDict(sorted_items) From dc6b0e407fd8e65bfab85c9ce52770887de9cc20 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 29 Nov 2023 15:27:17 +0300 Subject: [PATCH 090/274] [#133] Change reporter usage Signed-off-by: Andrey Berezin --- .../healthcheck/basic_healthcheck.py | 30 ++-- src/frostfs_testlib/load/k6.py | 7 +- src/frostfs_testlib/load/load_verifiers.py | 23 +-- src/frostfs_testlib/load/runners.py | 32 ++-- .../processes/remote_process.py | 62 +++---- src/frostfs_testlib/reporter/__init__.py | 6 + .../reporter/allure_handler.py | 7 +- src/frostfs_testlib/s3/aws_cli_client.py | 151 ++++++++++-------- src/frostfs_testlib/s3/boto3_client.py | 145 ++++++++--------- src/frostfs_testlib/shell/local_shell.py | 3 +- src/frostfs_testlib/shell/ssh_shell.py | 3 +- src/frostfs_testlib/steps/acl.py | 19 +-- src/frostfs_testlib/steps/cli/container.py | 35 ++-- src/frostfs_testlib/steps/cli/object.py | 41 +++-- .../steps/complex_object_actions.py | 7 +- src/frostfs_testlib/steps/epoch.py | 17 +- src/frostfs_testlib/steps/http/http_gate.py | 58 +++---- src/frostfs_testlib/steps/network.py | 30 ++-- src/frostfs_testlib/steps/node_management.py | 61 +++---- src/frostfs_testlib/steps/payment_neogo.py | 24 ++- src/frostfs_testlib/steps/s3/s3_helper.py | 86 +++------- src/frostfs_testlib/steps/session_token.py | 35 ++-- src/frostfs_testlib/steps/storage_object.py | 9 +- src/frostfs_testlib/steps/storage_policy.py | 37 ++--- src/frostfs_testlib/steps/tombstone.py | 21 +-- src/frostfs_testlib/storage/cluster.py | 4 +- .../configuration/service_configuration.py | 4 +- .../controllers/background_load_controller.py | 20 ++- .../controllers/cluster_state_controller.py | 94 ++++++----- .../state_managers/config_state_manager.py | 12 +- .../storage/dataclasses/node_base.py | 4 +- .../testing/cluster_test_base.py | 6 +- src/frostfs_testlib/utils/cli_utils.py | 7 +- src/frostfs_testlib/utils/env_utils.py | 7 +- src/frostfs_testlib/utils/failover_utils.py | 32 ++-- src/frostfs_testlib/utils/file_keeper.py | 10 +- src/frostfs_testlib/utils/file_utils.py | 7 +- 37 files changed, 478 insertions(+), 678 deletions(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 4cb3a48..0443e28 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -1,8 +1,8 @@ from typing import Callable +from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.healthcheck.interfaces import Healthcheck -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.shell import CommandOptions from frostfs_testlib.steps.node_management import storage_node_healthcheck @@ -10,8 +10,6 @@ from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.failover_utils import check_services_status -reporter = get_reporter() - class BasicHealthcheck(Healthcheck): def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]): @@ -23,36 +21,33 @@ class BasicHealthcheck(Healthcheck): assert not issues, "Issues found:\n" + "\n".join(issues) - @wait_for_success(900, 30) + @wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}") def full_healthcheck(self, cluster_node: ClusterNode): checks = { self.storage_healthcheck: {}, self._tree_healthcheck: {}, } - with reporter.step(f"Perform full healthcheck for {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) - @wait_for_success(900, 30) + @wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}") def startup_healthcheck(self, cluster_node: ClusterNode): checks = { self.storage_healthcheck: {}, self._tree_healthcheck: {}, } - with reporter.step(f"Perform startup healthcheck on {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) - @wait_for_success(900, 30) + @wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}") def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: checks = { self._storage_healthcheck: {}, } - with reporter.step(f"Perform storage healthcheck on {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) - @wait_for_success(120, 5) + @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}") def services_healthcheck(self, cluster_node: ClusterNode): svcs_to_check = cluster_node.services checks = { @@ -63,8 +58,7 @@ class BasicHealthcheck(Healthcheck): self._check_services: {"services": svcs_to_check}, } - with reporter.step(f"Perform service healthcheck on {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]): for svc in services: @@ -72,14 +66,14 @@ class BasicHealthcheck(Healthcheck): if result == False: return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}." - @reporter.step_deco("Storage healthcheck on {cluster_node}") + @reporter.step("Storage healthcheck on {cluster_node}") def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: result = storage_node_healthcheck(cluster_node.storage_node) self._gather_socket_info(cluster_node) if result.health_status != "READY" or result.network_status != "ONLINE": return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" - @reporter.step_deco("Tree healthcheck on {cluster_node}") + @reporter.step("Tree healthcheck on {cluster_node}") def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: host = cluster_node.host service_config = host.get_service_config(cluster_node.storage_node.name) @@ -102,6 +96,6 @@ class BasicHealthcheck(Healthcheck): f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" ) - @reporter.step_deco("Gather socket info for {cluster_node}") + @reporter.step("Gather socket info for {cluster_node}") def _gather_socket_info(self, cluster_node: ClusterNode): cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 3dedd53..92da8e0 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -8,10 +8,10 @@ from time import sleep from typing import Any from urllib.parse import urlparse +from frostfs_testlib import reporter from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.processes.remote_process import RemoteProcess -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD from frostfs_testlib.shell import Shell @@ -21,7 +21,6 @@ from frostfs_testlib.testing.test_control import wait_for_success EXIT_RESULT_CODE = 0 logger = logging.getLogger("NeoLogger") -reporter = get_reporter() @dataclass @@ -102,7 +101,7 @@ class K6: self.preset_output = result.stdout.strip("\n") return self.preset_output - @reporter.step_deco("Generate K6 command") + @reporter.step("Generate K6 command") def _generate_env_variables(self) -> str: env_vars = self.load_params.get_env_vars() @@ -216,7 +215,7 @@ class K6: return self._k6_process.running() return False - @reporter.step_deco("Wait until K6 process end") + @reporter.step("Wait until K6 process end") @wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") def _wait_until_process_end(self): return self._k6_process.running() diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index b691b02..fe39862 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -1,11 +1,6 @@ -import logging - +from frostfs_testlib import reporter from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object -from frostfs_testlib.reporter import get_reporter - -reporter = get_reporter() -logger = logging.getLogger("NeoLogger") class LoadVerifier: @@ -49,19 +44,11 @@ class LoadVerifier: if deleters and not delete_operations: issues.append(f"No any delete operation was performed") - if ( - write_operations - and writers - and write_errors / write_operations * 100 > self.load_params.error_threshold - ): + if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold: issues.append( f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" ) - if ( - read_operations - and readers - and read_errors / read_operations * 100 > self.load_params.error_threshold - ): + if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold: issues.append( f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" ) @@ -89,9 +76,7 @@ class LoadVerifier: ) return verify_issues - def _collect_verify_issues_on_process( - self, label, load_summary, verification_summary - ) -> list[str]: + def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]: issues = [] load_metrics = get_metrics_object(self.load_params.scenario, load_summary) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index ea5a374..f5284d8 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -9,13 +9,13 @@ from urllib.parse import urlparse import yaml +from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources import optionals from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import STORAGE_USER_NAME @@ -31,17 +31,15 @@ from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils.file_keeper import FileKeeper -reporter = get_reporter() - class RunnerBase(ScenarioRunner): k6_instances: list[K6] - @reporter.step_deco("Run preset on loaders") + @reporter.step("Run preset on loaders") def preset(self): parallel([k6.preset for k6 in self.k6_instances]) - @reporter.step_deco("Wait until load finish") + @reporter.step("Wait until load finish") def wait_until_finish(self, soft_timeout: int = 0): parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout) @@ -70,7 +68,7 @@ class DefaultRunner(RunnerBase): self.loaders_wallet = loaders_wallet @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Preparation steps") + @reporter.step("Preparation steps") def prepare( self, load_params: LoadParams, @@ -127,7 +125,7 @@ class DefaultRunner(RunnerBase): ] shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) - @reporter.step_deco("Init k6 instances") + @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] cycled_loaders = itertools.cycle(self.loaders) @@ -271,7 +269,7 @@ class LocalRunner(RunnerBase): self.nodes_under_load = nodes_under_load @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Preparation steps") + @reporter.step("Preparation steps") def prepare( self, load_params: LoadParams, @@ -298,7 +296,7 @@ class LocalRunner(RunnerBase): return True - @reporter.step_deco("Prepare node {cluster_node}") + @reporter.step("Prepare node {cluster_node}") def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams): shell = cluster_node.host.get_shell() @@ -323,7 +321,7 @@ class LocalRunner(RunnerBase): shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") - @reporter.step_deco("Init k6 instances") + @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] futures = parallel( @@ -369,12 +367,12 @@ class LocalRunner(RunnerBase): with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): time.sleep(wait_after_start_time) - @reporter.step_deco("Restore passwd on {cluster_node}") + @reporter.step("Restore passwd on {cluster_node}") def restore_passwd_on_node(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("sudo chattr -i /etc/passwd") - @reporter.step_deco("Lock passwd on {cluster_node}") + @reporter.step("Lock passwd on {cluster_node}") def lock_passwd_on_node(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("sudo chattr +i /etc/passwd") @@ -400,19 +398,19 @@ class S3LocalRunner(LocalRunner): endpoints: list[str] k6_dir: str - @reporter.step_deco("Run preset on loaders") + @reporter.step("Run preset on loaders") def preset(self): LocalRunner.preset(self) with reporter.step(f"Resolve containers in preset"): parallel(self._resolve_containers_in_preset, self.k6_instances) - @reporter.step_deco("Resolve containers in preset") + @reporter.step("Resolve containers in preset") def _resolve_containers_in_preset(self, k6_instance: K6): k6_instance.shell.exec( f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}" ) - @reporter.step_deco("Init k6 instances") + @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] futures = parallel( @@ -448,7 +446,7 @@ class S3LocalRunner(LocalRunner): ) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Preparation steps") + @reporter.step("Preparation steps") def prepare( self, load_params: LoadParams, @@ -464,7 +462,7 @@ class S3LocalRunner(LocalRunner): parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer) - @reporter.step_deco("Prepare node {cluster_node}") + @reporter.step("Prepare node {cluster_node}") def prepare_node( self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str ): diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index d92d77a..1252b97 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -8,18 +8,14 @@ from tenacity import retry from tenacity.stop import stop_after_attempt from tenacity.wait import wait_fixed -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell import Shell from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions -reporter = get_reporter() - class RemoteProcess: - def __init__( - self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector] - ): + def __init__(self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector]): self.process_dir = process_dir self.cmd = cmd self.stdout_last_line_number = 0 @@ -32,10 +28,8 @@ class RemoteProcess: self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] @classmethod - @reporter.step_deco("Create remote process") - def create( - cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None - ) -> RemoteProcess: + @reporter.step("Create remote process") + def create(cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None) -> RemoteProcess: """ Create a process on a remote host. @@ -68,7 +62,7 @@ class RemoteProcess: remote_process.pid = remote_process._get_pid() return remote_process - @reporter.step_deco("Get process stdout") + @reporter.step("Get process stdout") def stdout(self, full: bool = False) -> str: """ Method to get process stdout, either fresh info or full. @@ -100,7 +94,7 @@ class RemoteProcess: return resulted_stdout return "" - @reporter.step_deco("Get process stderr") + @reporter.step("Get process stderr") def stderr(self, full: bool = False) -> str: """ Method to get process stderr, either fresh info or full. @@ -131,7 +125,7 @@ class RemoteProcess: return resulted_stderr return "" - @reporter.step_deco("Get process rc") + @reporter.step("Get process rc") def rc(self) -> Optional[int]: if self.proc_rc is not None: return self.proc_rc @@ -148,11 +142,11 @@ class RemoteProcess: self.proc_rc = int(terminal.stdout) return self.proc_rc - @reporter.step_deco("Check if process is running") + @reporter.step("Check if process is running") def running(self) -> bool: return self.rc() is None - @reporter.step_deco("Send signal to process") + @reporter.step("Send signal to process") def send_signal(self, signal: int) -> None: kill_res = self.shell.exec( f"kill -{signal} {self.pid}", @@ -161,27 +155,23 @@ class RemoteProcess: if "No such process" in kill_res.stderr: return if kill_res.return_code: - raise AssertionError( - f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}" - ) + raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}") - @reporter.step_deco("Stop process") + @reporter.step("Stop process") def stop(self) -> None: self.send_signal(15) - @reporter.step_deco("Kill process") + @reporter.step("Kill process") def kill(self) -> None: self.send_signal(9) - @reporter.step_deco("Clear process directory") + @reporter.step("Clear process directory") def clear(self) -> None: if self.process_dir == "/": raise AssertionError(f"Invalid path to delete: {self.process_dir}") - self.shell.exec( - f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) + self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) - @reporter.step_deco("Start remote process") + @reporter.step("Start remote process") def _start_process(self) -> None: self.shell.exec( f"nohup {self.process_dir}/command.sh None: - self.shell.exec( - f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) - self.shell.exec( - f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) - terminal = self.shell.exec( - f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) + self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) self.process_dir = terminal.stdout.strip() - @reporter.step_deco("Get pid") + @reporter.step("Get pid") @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) def _get_pid(self) -> str: - terminal = self.shell.exec( - f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) + terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors)) assert terminal.stdout, f"invalid pid: {terminal.stdout}" return terminal.stdout.strip() - @reporter.step_deco("Generate command script") + @reporter.step("Generate command script") def _generate_command_script(self, command: str) -> None: command = command.replace('"', '\\"').replace("\\", "\\\\") script = ( diff --git a/src/frostfs_testlib/reporter/__init__.py b/src/frostfs_testlib/reporter/__init__.py index e2c113c..848c175 100644 --- a/src/frostfs_testlib/reporter/__init__.py +++ b/src/frostfs_testlib/reporter/__init__.py @@ -1,3 +1,5 @@ +from typing import Any + from frostfs_testlib.reporter.allure_handler import AllureHandler from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.reporter import Reporter @@ -20,3 +22,7 @@ def get_reporter() -> Reporter: def step(title: str): return __reporter.step(title) + + +def attach(content: Any, file_name: str): + return __reporter.attach(content, file_name) diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py index 9089f98..ef63638 100644 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -21,9 +21,14 @@ class AllureHandler(ReporterHandler): def attach(self, body: Any, file_name: str) -> None: attachment_name, extension = os.path.splitext(file_name) + if extension.startswith("."): + extension = extension[1:] attachment_type = self._resolve_attachment_type(extension) - allure.attach(body, attachment_name, attachment_type, extension) + if os.path.exists(body): + allure.attach.file(body, file_name, attachment_type, extension) + else: + allure.attach(body, attachment_name, attachment_type, extension) def _resolve_attachment_type(self, extension: str) -> attachment_type: """Try to find matching Allure attachment type by extension. diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 059e949..e4f2bb2 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -6,7 +6,7 @@ from datetime import datetime from time import sleep from typing import Literal, Optional, Union -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions @@ -15,7 +15,6 @@ from frostfs_testlib.shell.local_shell import LocalShell # TODO: Refactor this code to use shell instead of _cmd_run from frostfs_testlib.utils.cli_utils import _configure_aws_cli -reporter = get_reporter() logger = logging.getLogger("NeoLogger") command_options = CommandOptions(timeout=480) @@ -28,8 +27,10 @@ class AwsCliClient(S3ClientWrapper): common_flags = "--no-verify-ssl --no-paginate" s3gate_endpoint: str - @reporter.step_deco("Configure S3 client (aws cli)") - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: + @reporter.step("Configure S3 client (aws cli)") + def __init__( + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" + ) -> None: self.s3gate_endpoint = s3gate_endpoint self.profile = profile self.local_shell = LocalShell() @@ -42,11 +43,11 @@ class AwsCliClient(S3ClientWrapper): except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err - @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") def set_endpoint(self, s3gate_endpoint: str): self.s3gate_endpoint = s3gate_endpoint - @reporter.step_deco("Create bucket S3") + @reporter.step("Create bucket S3") def create_bucket( self, bucket: Optional[str] = None, @@ -85,25 +86,25 @@ class AwsCliClient(S3ClientWrapper): return bucket - @reporter.step_deco("List buckets S3") + @reporter.step("List buckets S3") def list_buckets(self) -> list[str]: cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout buckets_json = self._to_json(output) return [bucket["Name"] for bucket in buckets_json["Buckets"]] - @reporter.step_deco("Delete bucket S3") + @reporter.step("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd, command_options) sleep(S3_SYNC_WAIT_TIME) - @reporter.step_deco("Head bucket S3") + @reporter.step("Head bucket S3") def head_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd) - @reporter.step_deco("Put bucket versioning status") + @reporter.step("Put bucket versioning status") def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: cmd = ( f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " @@ -112,7 +113,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get bucket versioning status") + @reporter.step("Get bucket versioning status") def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: cmd = ( f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " @@ -122,7 +123,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Status") - @reporter.step_deco("Put bucket tagging") + @reporter.step("Put bucket tagging") def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} cmd = ( @@ -131,34 +132,42 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get bucket tagging") + @reporter.step("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: cmd = ( - f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("TagSet") - @reporter.step_deco("Get bucket acl") + @reporter.step("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: - cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") - @reporter.step_deco("Get bucket location") + @reporter.step("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: cmd = ( - f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("LocationConstraint") - @reporter.step_deco("List objects S3") + @reporter.step("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api list-objects --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -167,9 +176,12 @@ class AwsCliClient(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects S3 v2") + @reporter.step("List objects S3 v2") def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -178,7 +190,7 @@ class AwsCliClient(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects versions S3") + @reporter.step("List objects versions S3") def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " @@ -188,7 +200,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response if full_output else response.get("Versions", []) - @reporter.step_deco("List objects delete markers S3") + @reporter.step("List objects delete markers S3") def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " @@ -198,7 +210,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response if full_output else response.get("DeleteMarkers", []) - @reporter.step_deco("Copy object S3") + @reporter.step("Copy object S3") def copy_object( self, source_bucket: str, @@ -236,7 +248,7 @@ class AwsCliClient(S3ClientWrapper): self.local_shell.exec(cmd, command_options) return key - @reporter.step_deco("Put object S3") + @reporter.step("Put object S3") def put_object( self, bucket: str, @@ -280,7 +292,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("VersionId") - @reporter.step_deco("Head object S3") + @reporter.step("Head object S3") def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -291,7 +303,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response - @reporter.step_deco("Get object S3") + @reporter.step("Get object S3") def get_object( self, bucket: str, @@ -312,7 +324,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response if full_output else file_path - @reporter.step_deco("Get object ACL") + @reporter.step("Get object ACL") def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -323,7 +335,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Grants") - @reporter.step_deco("Put object ACL") + @reporter.step("Put object ACL") def put_object_acl( self, bucket: str, @@ -346,7 +358,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Grants") - @reporter.step_deco("Put bucket ACL") + @reporter.step("Put bucket ACL") def put_bucket_acl( self, bucket: str, @@ -354,7 +366,10 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " + f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) if acl: cmd += f" --acl {acl}" if grant_write: @@ -363,7 +378,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-read {grant_read}" self.local_shell.exec(cmd) - @reporter.step_deco("Delete objects S3") + @reporter.step("Delete objects S3") def delete_objects(self, bucket: str, keys: list[str]) -> dict: file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") delete_structure = json.dumps(_make_objs_dict(keys)) @@ -380,7 +395,7 @@ class AwsCliClient(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return response - @reporter.step_deco("Delete object S3") + @reporter.step("Delete object S3") def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -391,7 +406,7 @@ class AwsCliClient(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) - @reporter.step_deco("Delete object versions S3") + @reporter.step("Delete object versions S3") def delete_object_versions(self, bucket: str, object_versions: list) -> dict: # Build deletion list in S3 format delete_list = { @@ -418,13 +433,13 @@ class AwsCliClient(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) - @reporter.step_deco("Delete object versions S3 without delete markers") + @reporter.step("Delete object versions S3 without delete markers") def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) - @reporter.step_deco("Get object attributes") + @reporter.step("Get object attributes") def get_object_attributes( self, bucket: str, @@ -456,14 +471,17 @@ class AwsCliClient(S3ClientWrapper): else: return response.get(attributes[0]) - @reporter.step_deco("Get bucket policy") + @reporter.step("Get bucket policy") def get_bucket_policy(self, bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Policy") - @reporter.step_deco("Put bucket policy") + @reporter.step("Put bucket policy") def put_bucket_policy(self, bucket: str, policy: dict) -> None: # Leaving it as is was in test repo. Double dumps to escape resulting string # Example: @@ -478,14 +496,17 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get bucket cors") + @reporter.step("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("CORSRules") - @reporter.step_deco("Put bucket cors") + @reporter.step("Put bucket cors") def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: cmd = ( f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " @@ -493,14 +514,15 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Delete bucket cors") + @reporter.step("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: cmd = ( - f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) - @reporter.step_deco("Delete bucket tagging") + @reporter.step("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: cmd = ( f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " @@ -508,7 +530,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Put object retention") + @reporter.step("Put object retention") def put_object_retention( self, bucket: str, @@ -526,7 +548,7 @@ class AwsCliClient(S3ClientWrapper): cmd += " --bypass-governance-retention" self.local_shell.exec(cmd) - @reporter.step_deco("Put object legal hold") + @reporter.step("Put object legal hold") def put_object_legal_hold( self, bucket: str, @@ -542,7 +564,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Put object tagging") + @reporter.step("Put object tagging") def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} @@ -552,7 +574,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get object tagging") + @reporter.step("Get object tagging") def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -563,7 +585,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("TagSet") - @reporter.step_deco("Delete object tagging") + @reporter.step("Delete object tagging") def delete_object_tagging(self, bucket: str, key: str) -> None: cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " @@ -571,7 +593,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Sync directory S3") + @reporter.step("Sync directory S3") def sync( self, bucket: str, @@ -579,7 +601,10 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: - cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) if metadata: cmd += " --metadata" for key, value in metadata.items(): @@ -589,7 +614,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) - @reporter.step_deco("CP directory S3") + @reporter.step("CP directory S3") def cp( self, bucket: str, @@ -610,7 +635,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) - @reporter.step_deco("Create multipart upload S3") + @reporter.step("Create multipart upload S3") def create_multipart_upload(self, bucket: str, key: str) -> str: cmd = ( f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " @@ -623,7 +648,7 @@ class AwsCliClient(S3ClientWrapper): return response["UploadId"] - @reporter.step_deco("List multipart uploads S3") + @reporter.step("List multipart uploads S3") def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: cmd = ( f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " @@ -633,7 +658,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Uploads") - @reporter.step_deco("Abort multipart upload S3") + @reporter.step("Abort multipart upload S3") def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: cmd = ( f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " @@ -641,7 +666,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Upload part S3") + @reporter.step("Upload part S3") def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " @@ -653,7 +678,7 @@ class AwsCliClient(S3ClientWrapper): assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] - @reporter.step_deco("Upload copy part S3") + @reporter.step("Upload copy part S3") def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " @@ -666,7 +691,7 @@ class AwsCliClient(S3ClientWrapper): return response["CopyPartResult"]["ETag"] - @reporter.step_deco("List parts S3") + @reporter.step("List parts S3") def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: cmd = ( f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " @@ -679,7 +704,7 @@ class AwsCliClient(S3ClientWrapper): return response["Parts"] - @reporter.step_deco("Complete multipart upload S3") + @reporter.step("Complete multipart upload S3") def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} @@ -696,7 +721,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Put object lock configuration") + @reporter.step("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: cmd = ( f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " @@ -705,7 +730,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout return self._to_json(output) - @reporter.step_deco("Get object lock configuration") + @reporter.step("Get object lock configuration") def get_object_lock_configuration(self, bucket: str): cmd = ( f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index ba3716a..bdb177e 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -13,17 +13,11 @@ from botocore.config import Config from botocore.exceptions import ClientError from mypy_boto3_s3 import S3Client -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.common import ( - ASSETS_DIR, - MAX_REQUEST_ATTEMPTS, - RETRY_MODE, - S3_SYNC_WAIT_TIME, -) +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.utils.cli_utils import log_command_execution -reporter = get_reporter() logger = logging.getLogger("NeoLogger") # Disable warnings on self-signed certificate which the @@ -46,9 +40,11 @@ def report_error(func): class Boto3ClientWrapper(S3ClientWrapper): __repr_name__: str = "Boto3 client" - @reporter.step_deco("Configure S3 client (boto3)") + @reporter.step("Configure S3 client (boto3)") @report_error - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: + def __init__( + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" + ) -> None: self.boto3_client: S3Client = None self.session = boto3.Session(profile_name=profile) self.config = Config( @@ -62,7 +58,7 @@ class Boto3ClientWrapper(S3ClientWrapper): self.s3gate_endpoint: str = "" self.set_endpoint(s3gate_endpoint) - @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") def set_endpoint(self, s3gate_endpoint: str): if self.s3gate_endpoint == s3gate_endpoint: return @@ -90,7 +86,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return result # BUCKET METHODS # - @reporter.step_deco("Create bucket S3") + @reporter.step("Create bucket S3") @report_error def create_bucket( self, @@ -118,16 +114,14 @@ class Boto3ClientWrapper(S3ClientWrapper): elif grant_full_control: params.update({"GrantFullControl": grant_full_control}) if location_constraint: - params.update( - {"CreateBucketConfiguration": {"LocationConstraint": location_constraint}} - ) + params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) s3_bucket = self.boto3_client.create_bucket(**params) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) sleep(S3_SYNC_WAIT_TIME) return bucket - @reporter.step_deco("List buckets S3") + @reporter.step("List buckets S3") @report_error def list_buckets(self) -> list[str]: found_buckets = [] @@ -140,20 +134,20 @@ class Boto3ClientWrapper(S3ClientWrapper): return found_buckets - @reporter.step_deco("Delete bucket S3") + @reporter.step("Delete bucket S3") @report_error def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) log_command_execution("S3 Delete bucket result", response) sleep(S3_SYNC_WAIT_TIME) - @reporter.step_deco("Head bucket S3") + @reporter.step("Head bucket S3") @report_error def head_bucket(self, bucket: str) -> None: response = self.boto3_client.head_bucket(Bucket=bucket) log_command_execution("S3 Head bucket result", response) - @reporter.step_deco("Put bucket versioning status") + @reporter.step("Put bucket versioning status") @report_error def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: response = self.boto3_client.put_bucket_versioning( @@ -161,7 +155,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Set bucket versioning to", response) - @reporter.step_deco("Get bucket versioning status") + @reporter.step("Get bucket versioning status") @report_error def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: response = self.boto3_client.get_bucket_versioning(Bucket=bucket) @@ -169,7 +163,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Got bucket versioning status", response) return status - @reporter.step_deco("Put bucket tagging") + @reporter.step("Put bucket tagging") @report_error def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] @@ -177,27 +171,27 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging) log_command_execution("S3 Put bucket tagging", response) - @reporter.step_deco("Get bucket tagging") + @reporter.step("Get bucket tagging") @report_error def get_bucket_tagging(self, bucket: str) -> list: response = self.boto3_client.get_bucket_tagging(Bucket=bucket) log_command_execution("S3 Get bucket tagging", response) return response.get("TagSet") - @reporter.step_deco("Get bucket acl") + @reporter.step("Get bucket acl") @report_error def get_bucket_acl(self, bucket: str) -> list: response = self.boto3_client.get_bucket_acl(Bucket=bucket) log_command_execution("S3 Get bucket acl", response) return response.get("Grants") - @reporter.step_deco("Delete bucket tagging") + @reporter.step("Delete bucket tagging") @report_error def delete_bucket_tagging(self, bucket: str) -> None: response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) log_command_execution("S3 Delete bucket tagging", response) - @reporter.step_deco("Put bucket ACL") + @reporter.step("Put bucket ACL") @report_error def put_bucket_acl( self, @@ -214,60 +208,56 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_bucket_acl(**params) log_command_execution("S3 ACL bucket result", response) - @reporter.step_deco("Put object lock configuration") + @reporter.step("Put object lock configuration") @report_error def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - response = self.boto3_client.put_object_lock_configuration( - Bucket=bucket, ObjectLockConfiguration=configuration - ) + response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration) log_command_execution("S3 put_object_lock_configuration result", response) return response - @reporter.step_deco("Get object lock configuration") + @reporter.step("Get object lock configuration") @report_error def get_object_lock_configuration(self, bucket: str) -> dict: response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) log_command_execution("S3 get_object_lock_configuration result", response) return response.get("ObjectLockConfiguration") - @reporter.step_deco("Get bucket policy") + @reporter.step("Get bucket policy") @report_error def get_bucket_policy(self, bucket: str) -> str: response = self.boto3_client.get_bucket_policy(Bucket=bucket) log_command_execution("S3 get_bucket_policy result", response) return response.get("Policy") - @reporter.step_deco("Put bucket policy") + @reporter.step("Put bucket policy") @report_error def put_bucket_policy(self, bucket: str, policy: dict) -> None: response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)) log_command_execution("S3 put_bucket_policy result", response) return response - @reporter.step_deco("Get bucket cors") + @reporter.step("Get bucket cors") @report_error def get_bucket_cors(self, bucket: str) -> dict: response = self.boto3_client.get_bucket_cors(Bucket=bucket) log_command_execution("S3 get_bucket_cors result", response) return response.get("CORSRules") - @reporter.step_deco("Get bucket location") + @reporter.step("Get bucket location") @report_error def get_bucket_location(self, bucket: str) -> str: response = self.boto3_client.get_bucket_location(Bucket=bucket) log_command_execution("S3 get_bucket_location result", response) return response.get("LocationConstraint") - @reporter.step_deco("Put bucket cors") + @reporter.step("Put bucket cors") @report_error def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - response = self.boto3_client.put_bucket_cors( - Bucket=bucket, CORSConfiguration=cors_configuration - ) + response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration) log_command_execution("S3 put_bucket_cors result", response) return response - @reporter.step_deco("Delete bucket cors") + @reporter.step("Delete bucket cors") @report_error def delete_bucket_cors(self, bucket: str) -> None: response = self.boto3_client.delete_bucket_cors(Bucket=bucket) @@ -276,7 +266,7 @@ class Boto3ClientWrapper(S3ClientWrapper): # END OF BUCKET METHODS # # OBJECT METHODS # - @reporter.step_deco("List objects S3 v2") + @reporter.step("List objects S3 v2") @report_error def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: response = self.boto3_client.list_objects_v2(Bucket=bucket) @@ -287,7 +277,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects S3") + @reporter.step("List objects S3") @report_error def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: response = self.boto3_client.list_objects(Bucket=bucket) @@ -298,21 +288,21 @@ class Boto3ClientWrapper(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects versions S3") + @reporter.step("List objects versions S3") @report_error def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: response = self.boto3_client.list_object_versions(Bucket=bucket) log_command_execution("S3 List objects versions result", response) return response if full_output else response.get("Versions", []) - @reporter.step_deco("List objects delete markers S3") + @reporter.step("List objects delete markers S3") @report_error def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: response = self.boto3_client.list_object_versions(Bucket=bucket) log_command_execution("S3 List objects delete markers result", response) return response if full_output else response.get("DeleteMarkers", []) - @reporter.step_deco("Put object S3") + @reporter.step("Put object S3") @report_error def put_object( self, @@ -343,7 +333,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Put object result", response) return response.get("VersionId") - @reporter.step_deco("Head object S3") + @reporter.step("Head object S3") @report_error def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: params = { @@ -355,7 +345,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Head object result", response) return response - @reporter.step_deco("Delete object S3") + @reporter.step("Delete object S3") @report_error def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: params = { @@ -368,7 +358,7 @@ class Boto3ClientWrapper(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return response - @reporter.step_deco("Delete objects S3") + @reporter.step("Delete objects S3") @report_error def delete_objects(self, bucket: str, keys: list[str]) -> dict: response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys)) @@ -379,7 +369,7 @@ class Boto3ClientWrapper(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return response - @reporter.step_deco("Delete object versions S3") + @reporter.step("Delete object versions S3") @report_error def delete_object_versions(self, bucket: str, object_versions: list) -> dict: # Build deletion list in S3 format @@ -396,7 +386,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Delete objects result", response) return response - @reporter.step_deco("Delete object versions S3 without delete markers") + @reporter.step("Delete object versions S3 without delete markers") @report_error def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers @@ -406,7 +396,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Delete object result", response) - @reporter.step_deco("Put object ACL") + @reporter.step("Put object ACL") @report_error def put_object_acl( self, @@ -419,7 +409,7 @@ class Boto3ClientWrapper(S3ClientWrapper): # pytest.skip("Method put_object_acl is not supported by boto3 client") raise NotImplementedError("Unsupported for boto3 client") - @reporter.step_deco("Get object ACL") + @reporter.step("Get object ACL") @report_error def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: params = { @@ -431,7 +421,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 ACL objects result", response) return response.get("Grants") - @reporter.step_deco("Copy object S3") + @reporter.step("Copy object S3") @report_error def copy_object( self, @@ -460,7 +450,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Copy objects result", response) return key - @reporter.step_deco("Get object S3") + @reporter.step("Get object S3") @report_error def get_object( self, @@ -478,8 +468,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params = { self._to_s3_param(param): value for param, value in {**locals(), **{"Range": range_str}}.items() - if param not in ["self", "object_range", "full_output", "range_str", "filename"] - and value is not None + if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None } response = self.boto3_client.get_object(**params) log_command_execution("S3 Get objects result", response) @@ -491,7 +480,7 @@ class Boto3ClientWrapper(S3ClientWrapper): chunk = response["Body"].read(1024) return response if full_output else filename - @reporter.step_deco("Create multipart upload S3") + @reporter.step("Create multipart upload S3") @report_error def create_multipart_upload(self, bucket: str, key: str) -> str: response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key) @@ -500,7 +489,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["UploadId"] - @reporter.step_deco("List multipart uploads S3") + @reporter.step("List multipart uploads S3") @report_error def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: response = self.boto3_client.list_multipart_uploads(Bucket=bucket) @@ -508,19 +497,15 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("Uploads") - @reporter.step_deco("Abort multipart upload S3") + @reporter.step("Abort multipart upload S3") @report_error def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - response = self.boto3_client.abort_multipart_upload( - Bucket=bucket, Key=key, UploadId=upload_id - ) + response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) log_command_execution("S3 Abort multipart upload", response) - @reporter.step_deco("Upload part S3") + @reporter.step("Upload part S3") @report_error - def upload_part( - self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str - ) -> str: + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: with open(filepath, "rb") as put_file: body = put_file.read() @@ -536,11 +521,9 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["ETag"] - @reporter.step_deco("Upload copy part S3") + @reporter.step("Upload copy part S3") @report_error - def upload_part_copy( - self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str - ) -> str: + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: response = self.boto3_client.upload_part_copy( UploadId=upload_id, Bucket=bucket, @@ -549,13 +532,11 @@ class Boto3ClientWrapper(S3ClientWrapper): CopySource=copy_source, ) log_command_execution("S3 Upload copy part", response) - assert response.get("CopyPartResult", []).get( - "ETag" - ), f"Expected ETag in response:\n{response}" + assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" return response["CopyPartResult"]["ETag"] - @reporter.step_deco("List parts S3") + @reporter.step("List parts S3") @report_error def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key) @@ -564,7 +545,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["Parts"] - @reporter.step_deco("Complete multipart upload S3") + @reporter.step("Complete multipart upload S3") @report_error def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] @@ -573,7 +554,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Complete multipart upload", response) - @reporter.step_deco("Put object retention") + @reporter.step("Put object retention") @report_error def put_object_retention( self, @@ -591,7 +572,7 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_object_retention(**params) log_command_execution("S3 Put object retention ", response) - @reporter.step_deco("Put object legal hold") + @reporter.step("Put object legal hold") @report_error def put_object_legal_hold( self, @@ -609,7 +590,7 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_object_legal_hold(**params) log_command_execution("S3 Put object legal hold ", response) - @reporter.step_deco("Put object tagging") + @reporter.step("Put object tagging") @report_error def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] @@ -617,7 +598,7 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging) log_command_execution("S3 Put object tagging", response) - @reporter.step_deco("Get object tagging") + @reporter.step("Get object tagging") @report_error def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: params = { @@ -629,13 +610,13 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Get object tagging", response) return response.get("TagSet") - @reporter.step_deco("Delete object tagging") + @reporter.step("Delete object tagging") @report_error def delete_object_tagging(self, bucket: str, key: str) -> None: response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key) log_command_execution("S3 Delete object tagging", response) - @reporter.step_deco("Get object attributes") + @reporter.step("Get object attributes") @report_error def get_object_attributes( self, @@ -650,7 +631,7 @@ class Boto3ClientWrapper(S3ClientWrapper): logger.warning("Method get_object_attributes is not supported by boto3 client") return {} - @reporter.step_deco("Sync directory S3") + @reporter.step("Sync directory S3") @report_error def sync( self, @@ -661,7 +642,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> dict: raise NotImplementedError("Sync is not supported for boto3 client") - @reporter.step_deco("CP directory S3") + @reporter.step("CP directory S3") @report_error def cp( self, diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 26c7e9b..acf01ff 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -6,11 +6,10 @@ from typing import IO, Optional import pexpect -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("frostfs.testlib.shell") -reporter = get_reporter() class LocalShell(Shell): diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 6b12f81..a7e6e1d 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -9,11 +9,10 @@ from typing import ClassVar, Optional, Tuple from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception from paramiko.ssh_exception import AuthenticationException -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials logger = logging.getLogger("frostfs.testlib.shell") -reporter = get_reporter() class SshConnectionProvider: diff --git a/src/frostfs_testlib/steps/acl.py b/src/frostfs_testlib/steps/acl.py index 0ef101b..e97e4ee 100644 --- a/src/frostfs_testlib/steps/acl.py +++ b/src/frostfs_testlib/steps/acl.py @@ -8,8 +8,8 @@ from typing import List, Optional, Union import base58 +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -22,11 +22,10 @@ from frostfs_testlib.storage.dataclasses.acl import ( ) from frostfs_testlib.utils import wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Get extended ACL") +@reporter.step("Get extended ACL") def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]: cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) try: @@ -40,7 +39,7 @@ def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optiona return result.stdout -@reporter.step_deco("Set extended ACL") +@reporter.step("Set extended ACL") def set_eacl( wallet_path: str, cid: str, @@ -165,24 +164,20 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]: return rules -def sign_bearer( - shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool -) -> None: - frostfscli = FrostfsCli( - shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG - ) +def sign_bearer(shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: + frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG) frostfscli.util.sign_bearer_token( wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json ) -@reporter.step_deco("Wait for eACL cache expired") +@reporter.step("Wait for eACL cache expired") def wait_for_cache_expired(): sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT) return -@reporter.step_deco("Return bearer token in base64 to caller") +@reporter.step("Return bearer token in base64 to caller") def bearer_token_base64_from_file( bearer_path: str, ) -> str: diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 74f445a..be96138 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -5,8 +5,8 @@ from dataclasses import dataclass from time import sleep from typing import Optional, Union +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -17,7 +17,6 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.file_utils import generate_file, get_file_hash -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -47,7 +46,7 @@ class StorageContainer: def get_wallet_config_path(self) -> str: return self.storage_container_info.wallet_file.config_path - @reporter.step_deco("Generate new object and put in container") + @reporter.step("Generate new object and put in container") def generate_object( self, size: int, @@ -103,7 +102,7 @@ SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" -@reporter.step_deco("Create Container") +@reporter.step("Create Container") def create_container( wallet: str, shell: Shell, @@ -178,9 +177,7 @@ def wait_for_container_creation( return logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") sleep(sleep_interval) - raise RuntimeError( - f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting" - ) + raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") def wait_for_container_deletion( @@ -198,7 +195,7 @@ def wait_for_container_deletion( raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.") -@reporter.step_deco("List Containers") +@reporter.step("List Containers") def list_containers( wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT ) -> list[str]: @@ -219,7 +216,7 @@ def list_containers( return result.stdout.split() -@reporter.step_deco("List Objects in container") +@reporter.step("List Objects in container") def list_objects( wallet: str, shell: Shell, @@ -240,14 +237,12 @@ def list_objects( (list): list of containers """ cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.list_objects( - rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout - ) + result = cli.container.list_objects(rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout) logger.info(f"Container objects: \n{result}") return result.stdout.split() -@reporter.step_deco("Get Container") +@reporter.step("Get Container") def get_container( wallet: str, cid: str, @@ -271,9 +266,7 @@ def get_container( """ cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.get( - rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout - ) + result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout) if not json_mode: return result.stdout @@ -287,7 +280,7 @@ def get_container( return container_info -@reporter.step_deco("Delete Container") +@reporter.step("Delete Container") # TODO: make the error message about a non-found container more user-friendly def delete_container( wallet: str, @@ -350,7 +343,7 @@ def _parse_cid(output: str) -> str: return splitted[1] -@reporter.step_deco("Search container by name") +@reporter.step("Search container by name") def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str): list_cids = list_containers(wallet, shell, endpoint) for cid in list_cids: @@ -360,7 +353,7 @@ def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str return None -@reporter.step_deco("Search for nodes with a container") +@reporter.step("Search for nodes with a container") def search_nodes_with_container( wallet: str, cid: str, @@ -370,9 +363,7 @@ def search_nodes_with_container( timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> list[ClusterNode]: cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.search_node( - rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout - ) + result = cli.container.search_node(rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout) pattern = r"[0-9]+(?:\.[0-9]+){3}" nodes_ip = list(set(re.findall(pattern, result.stdout))) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 9c7c694..803524a 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -5,9 +5,9 @@ import re import uuid from typing import Any, Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli.neogo import NeoGo -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -16,10 +16,9 @@ from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output logger = logging.getLogger("NeoLogger") -reporter = get_reporter() -@reporter.step_deco("Get object from random node") +@reporter.step("Get object from random node") def get_object_from_random_node( wallet: str, cid: str, @@ -70,7 +69,7 @@ def get_object_from_random_node( ) -@reporter.step_deco("Get object from {endpoint}") +@reporter.step("Get object from {endpoint}") def get_object( wallet: str, cid: str, @@ -126,7 +125,7 @@ def get_object( return file_path -@reporter.step_deco("Get Range Hash from {endpoint}") +@reporter.step("Get Range Hash from {endpoint}") def get_range_hash( wallet: str, cid: str, @@ -176,7 +175,7 @@ def get_range_hash( return result.stdout.split(":")[1].strip() -@reporter.step_deco("Put object to random node") +@reporter.step("Put object to random node") def put_object_to_random_node( wallet: str, path: str, @@ -235,7 +234,7 @@ def put_object_to_random_node( ) -@reporter.step_deco("Put object at {endpoint} in container {cid}") +@reporter.step("Put object at {endpoint} in container {cid}") def put_object( wallet: str, path: str, @@ -296,7 +295,7 @@ def put_object( return oid.strip() -@reporter.step_deco("Delete object {cid}/{oid} from {endpoint}") +@reporter.step("Delete object {cid}/{oid} from {endpoint}") def delete_object( wallet: str, cid: str, @@ -344,7 +343,7 @@ def delete_object( return tombstone.strip() -@reporter.step_deco("Get Range") +@reporter.step("Get Range") def get_range( wallet: str, cid: str, @@ -397,7 +396,7 @@ def get_range( return range_file_path, content -@reporter.step_deco("Lock Object") +@reporter.step("Lock Object") def lock_object( wallet: str, cid: str, @@ -458,7 +457,7 @@ def lock_object( return oid.strip() -@reporter.step_deco("Search object") +@reporter.step("Search object") def search_object( wallet: str, cid: str, @@ -503,9 +502,7 @@ def search_object( cid=cid, bearer=bearer, xhdr=xhdr, - filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] - if filters - else None, + filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, session=session, phy=phy, root=root, @@ -517,19 +514,17 @@ def search_object( if expected_objects_list: if sorted(found_objects) == sorted(expected_objects_list): logger.info( - f"Found objects list '{found_objects}' " - f"is equal for expected list '{expected_objects_list}'" + f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'" ) else: logger.warning( - f"Found object list {found_objects} " - f"is not equal to expected list '{expected_objects_list}'" + f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'" ) return found_objects -@reporter.step_deco("Get netmap netinfo") +@reporter.step("Get netmap netinfo") def get_netmap_netinfo( wallet: str, shell: Shell, @@ -581,7 +576,7 @@ def get_netmap_netinfo( return settings -@reporter.step_deco("Head object") +@reporter.step("Head object") def head_object( wallet: str, cid: str, @@ -677,7 +672,7 @@ def head_object( return json_utils.decode_simple_header(decoded) -@reporter.step_deco("Run neo-go dump-keys") +@reporter.step("Run neo-go dump-keys") def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: """ Run neo-go dump keys command @@ -702,7 +697,7 @@ def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: return {address_id: wallet_key} -@reporter.step_deco("Run neo-go query height") +@reporter.step("Run neo-go query height") def neo_go_query_height(shell: Shell, endpoint: str) -> dict: """ Run neo-go query height command @@ -734,7 +729,7 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: } -@reporter.step_deco("Search object nodes") +@reporter.step("Search object nodes") def get_object_nodes( cluster: Cluster, wallet: str, diff --git a/src/frostfs_testlib/steps/complex_object_actions.py b/src/frostfs_testlib/steps/complex_object_actions.py index 54e5fc2..a67dd4c 100644 --- a/src/frostfs_testlib/steps/complex_object_actions.py +++ b/src/frostfs_testlib/steps/complex_object_actions.py @@ -12,7 +12,7 @@ import logging from typing import Optional, Tuple -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -20,7 +20,6 @@ from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -113,7 +112,7 @@ def get_complex_object_split_ranges( return ranges -@reporter.step_deco("Get Link Object") +@reporter.step("Get Link Object") def get_link_object( wallet: str, cid: str, @@ -166,7 +165,7 @@ def get_link_object( return None -@reporter.step_deco("Get Last Object") +@reporter.step("Get Last Object") def get_last_object( wallet: str, cid: str, diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index a589569..5a43ba3 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -2,8 +2,8 @@ import logging from time import sleep from typing import Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import ( CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, @@ -19,11 +19,10 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, Morp from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils import datetime_utils, wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Get epochs from nodes") +@reporter.step("Get epochs from nodes") def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: """ Get current epochs on each node. @@ -41,10 +40,8 @@ def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: return epochs_by_node -@reporter.step_deco("Ensure fresh epoch") -def ensure_fresh_epoch( - shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None -) -> int: +@reporter.step("Ensure fresh epoch") +def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None) -> int: # ensure new fresh epoch to avoid epoch switch during test session alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] current_epoch = get_epoch(shell, cluster, alive_node) @@ -54,7 +51,7 @@ def ensure_fresh_epoch( return epoch -@reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") +@reporter.step("Wait up to {timeout} seconds for nodes on cluster to align epochs") def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): @wait_for_success(timeout, 5, None, True) def check_epochs(): @@ -64,7 +61,7 @@ def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): check_epochs() -@reporter.step_deco("Get Epoch") +@reporter.step("Get Epoch") def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] endpoint = alive_node.get_rpc_endpoint() @@ -77,7 +74,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] return int(epoch.stdout) -@reporter.step_deco("Tick Epoch") +@reporter.step("Tick Epoch") def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): """ Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 2b70d6c..a8c9899 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -10,7 +10,7 @@ from urllib.parse import quote_plus import requests -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell @@ -21,15 +21,13 @@ from frostfs_testlib.storage.cluster import StorageNode from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils.file_utils import get_file_hash -reporter = get_reporter() - logger = logging.getLogger("NeoLogger") ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") local_shell = LocalShell() -@reporter.step_deco("Get via HTTP Gate") +@reporter.step("Get via HTTP Gate") def get_via_http_gate( cid: str, oid: str, @@ -53,9 +51,7 @@ def get_via_http_gate( else: request = f"{endpoint}{request_path}" - resp = requests.get( - request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False - ) + resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -75,10 +71,8 @@ def get_via_http_gate( return file_path -@reporter.step_deco("Get via Zip HTTP Gate") -def get_via_zip_http_gate( - cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300 -): +@reporter.step("Get via Zip HTTP Gate") +def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300): """ This function gets given object from HTTP gate cid: container id to get object from @@ -111,7 +105,7 @@ def get_via_zip_http_gate( return os.path.join(os.getcwd(), ASSETS_DIR, prefix) -@reporter.step_deco("Get via HTTP Gate by attribute") +@reporter.step("Get via HTTP Gate by attribute") def get_via_http_gate_by_attribute( cid: str, attribute: dict, @@ -136,9 +130,7 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get( - request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname} - ) + resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}) if not resp.ok: raise Exception( @@ -159,7 +151,7 @@ def get_via_http_gate_by_attribute( # TODO: pass http_hostname as a header -@reporter.step_deco("Upload via HTTP Gate") +@reporter.step("Upload via HTTP Gate") def upload_via_http_gate( cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 ) -> str: @@ -173,9 +165,7 @@ def upload_via_http_gate( request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post( - request, files=files, data=body, headers=headers, timeout=timeout, verify=False - ) + resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -193,7 +183,7 @@ def upload_via_http_gate( return resp.json().get("object_id") -@reporter.step_deco("Check is the passed object large") +@reporter.step("Check is the passed object large") def is_object_large(filepath: str) -> bool: """ This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE @@ -208,7 +198,7 @@ def is_object_large(filepath: str) -> bool: # TODO: pass http_hostname as a header -@reporter.step_deco("Upload via HTTP Gate using Curl") +@reporter.step("Upload via HTTP Gate using Curl") def upload_via_http_gate_curl( cid: str, filepath: str, @@ -256,7 +246,7 @@ def upload_via_http_gate_curl( @retry(max_attempts=3, sleep_interval=1) -@reporter.step_deco("Get via HTTP Gate using Curl") +@reporter.step("Get via HTTP Gate using Curl") def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: """ This function gets given object from HTTP gate using curl utility. @@ -280,7 +270,7 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): reporter.attach(command_attachment, f"{req_type} Request") -@reporter.step_deco("Try to get object and expect error") +@reporter.step("Try to get object and expect error") def try_to_get_object_and_expect_error( cid: str, oid: str, @@ -296,7 +286,7 @@ def try_to_get_object_and_expect_error( assert match, f"Expected {err} to match {error_pattern}" -@reporter.step_deco("Verify object can be get using HTTP header attribute") +@reporter.step("Verify object can be get using HTTP header attribute") def get_object_by_attr_and_verify_hashes( oid: str, file_name: str, @@ -305,9 +295,7 @@ def get_object_by_attr_and_verify_hashes( endpoint: str, http_hostname: str, ) -> None: - got_file_path_http = get_via_http_gate( - cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname - ) + got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) got_file_path_http_attr = get_via_http_gate_by_attribute( cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname ) @@ -348,9 +336,7 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter( - cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname - ) + got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -359,18 +345,14 @@ def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: st msg = "Expected hashes are equal for files {f1} and {f2}" got_file_hash_http = get_file_hash(got_file_1) assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1) - assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format( - f1=orig_file_name, f2=got_file_1 - ) + assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1) def attr_into_header(attrs: dict) -> dict: return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()} -@reporter.step_deco( - "Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'" -) +@reporter.step("Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'") def attr_into_str_header_curl(attrs: dict) -> list: headers = [] for k, v in attrs.items(): @@ -379,9 +361,7 @@ def attr_into_str_header_curl(attrs: dict) -> list: return headers -@reporter.step_deco( - "Try to get object via http (pass http_request and optional attributes) and expect error" -) +@reporter.step("Try to get object via http (pass http_request and optional attributes) and expect error") def try_to_get_object_via_passed_request_and_expect_error( cid: str, oid: str, diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py index a865461..64e235a 100644 --- a/src/frostfs_testlib/steps/network.py +++ b/src/frostfs_testlib/steps/network.py @@ -1,9 +1,7 @@ -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.test_control import retry -reporter = get_reporter() - class IpTablesHelper: @staticmethod @@ -21,11 +19,7 @@ class IpTablesHelper: @staticmethod def restore_input_traffic_to_port(node: ClusterNode) -> None: shell = node.host.get_shell() - ports = ( - shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'") - .stdout.strip() - .split("\n") - ) + ports = shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'").stdout.strip().split("\n") if ports[0] == "": return for port in ports: @@ -34,11 +28,7 @@ class IpTablesHelper: @staticmethod def restore_input_traffic_to_node(node: ClusterNode) -> None: shell = node.host.get_shell() - unlock_ip = ( - shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'") - .stdout.strip() - .split("\n") - ) + unlock_ip = shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'").stdout.strip().split("\n") if unlock_ip[0] == "": return for ip in unlock_ip: @@ -47,17 +37,17 @@ class IpTablesHelper: # TODO Move class to HOST class IfUpDownHelper: - @reporter.step_deco("Down {interface} to {node}") + @reporter.step("Down {interface} to {node}") def down_interface(self, node: ClusterNode, interface: str) -> None: shell = node.host.get_shell() shell.exec(f"ifdown {interface}") - @reporter.step_deco("Up {interface} to {node}") + @reporter.step("Up {interface} to {node}") def up_interface(self, node: ClusterNode, interface: str) -> None: shell = node.host.get_shell() shell.exec(f"ifup {interface}") - @reporter.step_deco("Up all interface to {node}") + @reporter.step("Up all interface to {node}") def up_all_interface(self, node: ClusterNode) -> None: shell = node.host.get_shell() interfaces = list(node.host.config.interfaces.keys()) @@ -65,7 +55,7 @@ class IfUpDownHelper: for name_interface in interfaces: self.check_state_up(node, name_interface) - @reporter.step_deco("Down all interface to {node}") + @reporter.step("Down all interface to {node}") def down_all_interface(self, node: ClusterNode) -> None: shell = node.host.get_shell() interfaces = list(node.host.config.interfaces.keys()) @@ -73,12 +63,10 @@ class IfUpDownHelper: for name_interface in interfaces: self.check_state_down(node, name_interface) - @reporter.step_deco("Check {node} to {interface}") + @reporter.step("Check {node} to {interface}") def check_state(self, node: ClusterNode, interface: str) -> str: shell = node.host.get_shell() - return shell.exec( - f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'" - ).stdout.strip() + return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip() @retry(max_attempts=5, sleep_interval=5, expected_result="UP") def check_state_up(self, node: ClusterNode, interface: str) -> str: diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index d91721c..28e3820 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -6,13 +6,9 @@ from dataclasses import dataclass from time import sleep from typing import Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.cli import ( - FROSTFS_ADM_CONFIG_PATH, - FROSTFS_ADM_EXEC, - FROSTFS_CLI_EXEC, -) +from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align @@ -20,7 +16,6 @@ from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils import datetime_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -40,7 +35,7 @@ class HealthStatus: return HealthStatus(network, health) -@reporter.step_deco("Get Locode from random storage node") +@reporter.step("Get Locode from random storage node") def get_locode_from_random_node(cluster: Cluster) -> str: node = random.choice(cluster.services(StorageNode)) locode = node.get_un_locode() @@ -48,7 +43,7 @@ def get_locode_from_random_node(cluster: Cluster) -> str: return locode -@reporter.step_deco("Healthcheck for storage node {node}") +@reporter.step("Healthcheck for storage node {node}") def storage_node_healthcheck(node: StorageNode) -> HealthStatus: """ The function returns storage node's health status. @@ -62,7 +57,7 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus: return HealthStatus.from_stdout(output) -@reporter.step_deco("Set status for {node}") +@reporter.step("Set status for {node}") def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: """ The function sets particular status for given node. @@ -75,7 +70,7 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> _run_control_command_with_retries(node, command, retries) -@reporter.step_deco("Get netmap snapshot") +@reporter.step("Get netmap snapshot") def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: """ The function returns string representation of netmap snapshot. @@ -95,7 +90,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: ).stdout -@reporter.step_deco("Get shard list for {node}") +@reporter.step("Get shard list for {node}") def node_shard_list(node: StorageNode) -> list[str]: """ The function returns list of shards for specified storage node. @@ -109,7 +104,7 @@ def node_shard_list(node: StorageNode) -> list[str]: return re.findall(r"Shard (.*):", output) -@reporter.step_deco("Shard set for {node}") +@reporter.step("Shard set for {node}") def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: """ The function sets mode for specified shard. @@ -120,7 +115,7 @@ def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: return _run_control_command_with_retries(node, command) -@reporter.step_deco("Drop object from {node}") +@reporter.step("Drop object from {node}") def drop_object(node: StorageNode, cid: str, oid: str) -> str: """ The function drops object from specified node. @@ -131,14 +126,14 @@ def drop_object(node: StorageNode, cid: str, oid: str) -> str: return _run_control_command_with_retries(node, command) -@reporter.step_deco("Delete data from host for node {node}") +@reporter.step("Delete data from host for node {node}") def delete_node_data(node: StorageNode) -> None: node.stop_service() node.host.delete_storage_node_data(node.name) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) -@reporter.step_deco("Exclude node {node_to_exclude} from network map") +@reporter.step("Exclude node {node_to_exclude} from network map") def exclude_node_from_network_map( node_to_exclude: StorageNode, alive_node: StorageNode, @@ -154,12 +149,10 @@ def exclude_node_from_network_map( wait_for_epochs_align(shell, cluster) snapshot = get_netmap_snapshot(node=alive_node, shell=shell) - assert ( - node_netmap_key not in snapshot - ), f"Expected node with key {node_netmap_key} to be absent in network map" + assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be absent in network map" -@reporter.step_deco("Include node {node_to_include} into network map") +@reporter.step("Include node {node_to_include} into network map") def include_node_to_network_map( node_to_include: StorageNode, alive_node: StorageNode, @@ -178,37 +171,29 @@ def include_node_to_network_map( check_node_in_map(node_to_include, shell, alive_node) -@reporter.step_deco("Check node {node} in network map") -def check_node_in_map( - node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None -) -> None: +@reporter.step("Check node {node} in network map") +def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: alive_node = alive_node or node node_netmap_key = node.get_wallet_public_key() logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") snapshot = get_netmap_snapshot(alive_node, shell) - assert ( - node_netmap_key in snapshot - ), f"Expected node with key {node_netmap_key} to be in network map" + assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" -@reporter.step_deco("Check node {node} NOT in network map") -def check_node_not_in_map( - node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None -) -> None: +@reporter.step("Check node {node} NOT in network map") +def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: alive_node = alive_node or node node_netmap_key = node.get_wallet_public_key() logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") snapshot = get_netmap_snapshot(alive_node, shell) - assert ( - node_netmap_key not in snapshot - ), f"Expected node with key {node_netmap_key} to be NOT in network map" + assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be NOT in network map" -@reporter.step_deco("Wait for node {node} is ready") +@reporter.step("Wait for node {node} is ready") def wait_for_node_to_be_ready(node: StorageNode) -> None: timeout, attempts = 30, 6 for _ in range(attempts): @@ -219,12 +204,10 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None: except Exception as err: logger.warning(f"Node {node} is not ready:\n{err}") sleep(timeout) - raise AssertionError( - f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds" - ) + raise AssertionError(f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds") -@reporter.step_deco("Remove nodes from network map trough cli-adm morph command") +@reporter.step("Remove nodes from network map trough cli-adm morph command") def remove_nodes_from_map_morph( shell: Shell, cluster: Cluster, diff --git a/src/frostfs_testlib/steps/payment_neogo.py b/src/frostfs_testlib/steps/payment_neogo.py index 7fe0b4d..8e78cca 100644 --- a/src/frostfs_testlib/steps/payment_neogo.py +++ b/src/frostfs_testlib/steps/payment_neogo.py @@ -8,21 +8,21 @@ from typing import Optional from neo3.wallet import utils as neo3_utils from neo3.wallet import wallet as neo3_wallet +from frostfs_testlib import reporter from frostfs_testlib.cli import NeoGo -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") EMPTY_PASSWORD = "" TX_PERSIST_TIMEOUT = 15 # seconds ASSET_POWER_SIDECHAIN = 10**12 + def get_nns_contract_hash(morph_chain: MorphChain) -> str: return morph_chain.rpc_client.get_contract_state(1)["hash"] @@ -39,6 +39,7 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] return bytes.decode(base64.b64decode(stack_data[0]["value"])) + def transaction_accepted(morph_chain: MorphChain, tx_id: str): """ This function returns True in case of accepted TX. @@ -62,7 +63,7 @@ def transaction_accepted(morph_chain: MorphChain, tx_id: str): return False -@reporter.step_deco("Get FrostFS Balance") +@reporter.step("Get FrostFS Balance") def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): """ This function returns FrostFS balance for given wallet. @@ -82,7 +83,8 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_ logger.error(f"failed to get wallet balance: {out}") raise out -@reporter.step_deco("Transfer Gas") + +@reporter.step("Transfer Gas") def transfer_gas( shell: Shell, amount: int, @@ -111,16 +113,10 @@ def transfer_gas( """ wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() wallet_from_password = ( - wallet_from_password - if wallet_from_password is not None - else morph_chain.get_wallet_password() - ) - address_from = address_from or wallet_utils.get_last_address_from_wallet( - wallet_from_path, wallet_from_password - ) - address_to = address_to or wallet_utils.get_last_address_from_wallet( - wallet_to_path, wallet_to_password + wallet_from_password if wallet_from_password is not None else morph_chain.get_wallet_password() ) + address_from = address_from or wallet_utils.get_last_address_from_wallet(wallet_from_path, wallet_from_password) + address_to = address_to or wallet_utils.get_last_address_from_wallet(wallet_to_path, wallet_to_password) neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) out = neogo.nep17.transfer( @@ -141,7 +137,7 @@ def transfer_gas( time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) -@reporter.step_deco("Get Sidechain Balance") +@reporter.step("Get Sidechain Balance") def get_sidechain_balance(morph_chain: MorphChain, address: str): resp = morph_chain.rpc_client.get_nep17_balances(address=address) logger.info(f"Got getnep17balances response: {resp}") diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index d746337..1d7adfa 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -8,27 +8,23 @@ from typing import Optional from dateutil.parser import parse +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell from frostfs_testlib.shell.interfaces import SshCredentials -from frostfs_testlib.steps.cli.container import ( - search_container_by_name, - search_nodes_with_container, -) +from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils.cli_utils import _run_with_passwd -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Expected all objects are presented in the bucket") +@reporter.step("Expected all objects are presented in the bucket") def check_objects_in_bucket( s3_client: S3ClientWrapper, bucket: str, @@ -37,13 +33,9 @@ def check_objects_in_bucket( ) -> None: unexpected_objects = unexpected_objects or [] bucket_objects = s3_client.list_objects(bucket) - assert len(bucket_objects) == len( - expected_objects - ), f"Expected {len(expected_objects)} objects in the bucket" + assert len(bucket_objects) == len(expected_objects), f"Expected {len(expected_objects)} objects in the bucket" for bucket_object in expected_objects: - assert ( - bucket_object in bucket_objects - ), f"Expected object {bucket_object} in objects list {bucket_objects}" + assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}" for bucket_object in unexpected_objects: assert ( @@ -51,21 +43,17 @@ def check_objects_in_bucket( ), f"Expected object {bucket_object} not in objects list {bucket_objects}" -@reporter.step_deco("Try to get object and got error") -def try_to_get_objects_and_expect_error( - s3_client: S3ClientWrapper, bucket: str, object_keys: list -) -> None: +@reporter.step("Try to get object and got error") +def try_to_get_objects_and_expect_error(s3_client: S3ClientWrapper, bucket: str, object_keys: list) -> None: for obj in object_keys: try: s3_client.get_object(bucket, obj) raise AssertionError(f"Object {obj} found in bucket {bucket}") except Exception as err: - assert "The specified key does not exist" in str( - err - ), f"Expected error in exception {err}" + assert "The specified key does not exist" in str(err), f"Expected error in exception {err}" -@reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'") +@reporter.step("Set versioning status to '{status}' for bucket '{bucket}'") def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): if status == VersioningStatus.UNDEFINED: return @@ -83,12 +71,8 @@ def object_key_from_file_path(full_path: str) -> str: def assert_tags( actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None ) -> None: - expected_tags = ( - [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] - ) - unexpected_tags = ( - [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] - ) + expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] + unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] if expected_tags == []: assert not actual_tags, f"Expected there is no tags, got {actual_tags}" assert len(expected_tags) == len(actual_tags) @@ -98,7 +82,7 @@ def assert_tags( assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" -@reporter.step_deco("Expected all tags are presented in object") +@reporter.step("Expected all tags are presented in object") def check_tags_by_object( s3_client: S3ClientWrapper, bucket: str, @@ -107,12 +91,10 @@ def check_tags_by_object( unexpected_tags: Optional[list] = None, ) -> None: actual_tags = s3_client.get_object_tagging(bucket, key) - assert_tags( - expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags - ) + assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) -@reporter.step_deco("Expected all tags are presented in bucket") +@reporter.step("Expected all tags are presented in bucket") def check_tags_by_bucket( s3_client: S3ClientWrapper, bucket: str, @@ -120,9 +102,7 @@ def check_tags_by_bucket( unexpected_tags: Optional[list] = None, ) -> None: actual_tags = s3_client.get_bucket_tagging(bucket) - assert_tags( - expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags - ) + assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) def assert_object_lock_mode( @@ -135,25 +115,19 @@ def assert_object_lock_mode( retain_period: Optional[int] = None, ): object_dict = s3_client.get_object(bucket, file_name, full_output=True) - assert ( - object_dict.get("ObjectLockMode") == object_lock_mode - ), f"Expected Object Lock Mode is {object_lock_mode}" + assert object_dict.get("ObjectLockMode") == object_lock_mode, f"Expected Object Lock Mode is {object_lock_mode}" assert ( object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" object_retain_date = object_dict.get("ObjectLockRetainUntilDate") - retain_date = ( - parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date - ) + retain_date = parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date if retain_until_date: assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( "%Y-%m-%dT%H:%M:%S" ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' elif retain_period: last_modify_date = object_dict.get("LastModified") - last_modify = ( - parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date - ) + last_modify = parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date assert ( retain_date - last_modify + timedelta(seconds=1) ).days == retain_period, f"Expected retention period is {retain_period} days" @@ -187,7 +161,7 @@ def assert_s3_acl(acl_grants: list, permitted_users: str): logger.error("FULL_CONTROL is given to All Users") -@reporter.step_deco("Init S3 Credentials") +@reporter.step("Init S3 Credentials") def init_s3_credentials( wallet: WalletInfo, shell: Shell, @@ -213,24 +187,18 @@ def init_s3_credentials( container_placement_policy=container_placement_policy, ).stdout aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( - "aws_access_key_id" - ) + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id") ) aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", issue_secret_output - ).group("aws_secret_access_key") - ) - cid = str( - re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group( - "container_id" + re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group( + "aws_secret_access_key" ) ) + cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) return cid, aws_access_key_id, aws_secret_access_key -@reporter.step_deco("Delete bucket with all objects") +@reporter.step("Delete bucket with all objects") def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): versioning_status = s3_client.get_bucket_versioning_status(bucket) if versioning_status == VersioningStatus.ENABLED.value: @@ -255,7 +223,7 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): s3_client.delete_bucket(bucket) -@reporter.step_deco("Search nodes bucket") +@reporter.step("Search nodes bucket") def search_nodes_with_bucket( cluster: Cluster, bucket_name: str, @@ -264,7 +232,5 @@ def search_nodes_with_bucket( endpoint: str, ) -> list[ClusterNode]: cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint) - nodes_list = search_nodes_with_container( - wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster - ) + nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) return nodes_list diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py index b82d0e2..6c87cac 100644 --- a/src/frostfs_testlib/steps/session_token.py +++ b/src/frostfs_testlib/steps/session_token.py @@ -7,8 +7,8 @@ from dataclasses import dataclass from enum import Enum from typing import Any, Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -17,7 +17,6 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import json_utils, wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") UNRELATED_KEY = "unrelated key in the session" @@ -50,7 +49,7 @@ class Lifetime: iat: int = 0 -@reporter.step_deco("Generate Session Token") +@reporter.step("Generate Session Token") def generate_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -72,9 +71,7 @@ def generate_session_token( file_path = os.path.join(tokens_dir, str(uuid.uuid4())) - pub_key_64 = wallet_utils.get_wallet_public_key( - session_wallet.path, session_wallet.password, "base64" - ) + pub_key_64 = wallet_utils.get_wallet_public_key(session_wallet.path, session_wallet.password, "base64") lifetime = lifetime or Lifetime() @@ -99,7 +96,7 @@ def generate_session_token( return file_path -@reporter.step_deco("Generate Session Token For Container") +@reporter.step("Generate Session Token For Container") def generate_container_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -126,11 +123,7 @@ def generate_container_session_token( "container": { "verb": verb.value, "wildcard": cid is None, - **( - {"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} - if cid is not None - else {} - ), + **({"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} if cid is not None else {}), }, } @@ -143,7 +136,7 @@ def generate_container_session_token( ) -@reporter.step_deco("Generate Session Token For Object") +@reporter.step("Generate Session Token For Object") def generate_object_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -185,7 +178,7 @@ def generate_object_session_token( ) -@reporter.step_deco("Get signed token for container session") +@reporter.step("Get signed token for container session") def get_container_signed_token( owner_wallet: WalletInfo, user_wallet: WalletInfo, @@ -207,7 +200,7 @@ def get_container_signed_token( return sign_session_token(shell, session_token_file, owner_wallet) -@reporter.step_deco("Get signed token for object session") +@reporter.step("Get signed token for object session") def get_object_signed_token( owner_wallet: WalletInfo, user_wallet: WalletInfo, @@ -234,7 +227,7 @@ def get_object_signed_token( return sign_session_token(shell, session_token_file, owner_wallet) -@reporter.step_deco("Create Session Token") +@reporter.step("Create Session Token") def create_session_token( shell: Shell, owner: str, @@ -265,7 +258,7 @@ def create_session_token( return session_token -@reporter.step_deco("Sign Session Token") +@reporter.step("Sign Session Token") def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str: """ This function signs the session token by the given wallet. @@ -279,10 +272,6 @@ def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) - The path to the signed token. """ signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli( - shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG - ) - frostfscli.util.sign_session_token( - wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file - ) + frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG) + frostfscli.util.sign_session_token(wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file) return signed_token_file diff --git a/src/frostfs_testlib/steps/storage_object.py b/src/frostfs_testlib/steps/storage_object.py index 7776754..ce1bb94 100644 --- a/src/frostfs_testlib/steps/storage_object.py +++ b/src/frostfs_testlib/steps/storage_object.py @@ -3,7 +3,7 @@ from time import sleep import pytest -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import delete_object, get_object @@ -12,16 +12,13 @@ from frostfs_testlib.steps.tombstone import verify_head_tombstone from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo -reporter = get_reporter() logger = logging.getLogger("NeoLogger") CLEANUP_TIMEOUT = 10 -@reporter.step_deco("Delete Objects") -def delete_objects( - storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster -) -> None: +@reporter.step("Delete Objects") +def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster) -> None: """ Deletes given storage objects. diff --git a/src/frostfs_testlib/steps/storage_policy.py b/src/frostfs_testlib/steps/storage_policy.py index eca25d2..d2202a4 100644 --- a/src/frostfs_testlib/steps/storage_policy.py +++ b/src/frostfs_testlib/steps/storage_policy.py @@ -6,7 +6,7 @@ """ import logging -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object @@ -14,14 +14,11 @@ from frostfs_testlib.steps.complex_object_actions import get_last_object from frostfs_testlib.storage.cluster import StorageNode from frostfs_testlib.utils import string_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Get Object Copies") -def get_object_copies( - complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> int: +@reporter.step("Get Object Copies") +def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ The function performs requests to all nodes of the container and finds out if they store a copy of the object. The procedure is @@ -45,10 +42,8 @@ def get_object_copies( ) -@reporter.step_deco("Get Simple Object Copies") -def get_simple_object_copies( - wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> int: +@reporter.step("Get Simple Object Copies") +def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ To figure out the number of a simple object copies, only direct HEAD requests should be made to the every node of the container. @@ -66,9 +61,7 @@ def get_simple_object_copies( copies = 0 for node in nodes: try: - response = head_object( - wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True - ) + response = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) if response: logger.info(f"Found object {oid} on node {node}") copies += 1 @@ -78,10 +71,8 @@ def get_simple_object_copies( return copies -@reporter.step_deco("Get Complex Object Copies") -def get_complex_object_copies( - wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> int: +@reporter.step("Get Complex Object Copies") +def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ To figure out the number of a complex object copies, we firstly need to retrieve its Last object. We consider that the number of @@ -102,10 +93,8 @@ def get_complex_object_copies( return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) -@reporter.step_deco("Get Nodes With Object") -def get_nodes_with_object( - cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> list[StorageNode]: +@reporter.step("Get Nodes With Object") +def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: """ The function returns list of nodes which store the given object. @@ -141,7 +130,7 @@ def get_nodes_with_object( return nodes_list -@reporter.step_deco("Get Nodes Without Object") +@reporter.step("Get Nodes Without Object") def get_nodes_without_object( wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] ) -> list[StorageNode]: @@ -160,9 +149,7 @@ def get_nodes_without_object( nodes_list = [] for node in nodes: try: - res = head_object( - wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True - ) + res = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) if res is None: nodes_list.append(node) except Exception as err: diff --git a/src/frostfs_testlib/steps/tombstone.py b/src/frostfs_testlib/steps/tombstone.py index a46cf77..b468c93 100644 --- a/src/frostfs_testlib/steps/tombstone.py +++ b/src/frostfs_testlib/steps/tombstone.py @@ -3,18 +3,15 @@ import logging from neo3.wallet import wallet -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Verify Head Tombstone") -def verify_head_tombstone( - wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str -): +@reporter.step("Verify Head Tombstone") +def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] @@ -30,12 +27,6 @@ def verify_head_tombstone( assert header["ownerID"] == addr, "Tombstone Owner ID is wrong" assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" - assert ( - header["sessionToken"]["body"]["object"]["verb"] == "DELETE" - ), "Header Session Type isn't DELETE" - assert ( - header["sessionToken"]["body"]["object"]["target"]["container"] == cid - ), "Header Session ID is wrong" - assert ( - oid in header["sessionToken"]["body"]["object"]["target"]["objects"] - ), "Header Session OID is wrong" + assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE" + assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong" + assert oid in header["sessionToken"]["body"]["object"]["target"]["objects"], "Header Session OID is wrong" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 02601ac..313215a 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -4,9 +4,9 @@ import re import yaml from yarl import URL +from frostfs_testlib import reporter from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting.config import ServiceConfig -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration @@ -16,8 +16,6 @@ from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry -reporter = get_reporter() - class ClusterNode: """ diff --git a/src/frostfs_testlib/storage/configuration/service_configuration.py b/src/frostfs_testlib/storage/configuration/service_configuration.py index 1aa7846..f7b3be7 100644 --- a/src/frostfs_testlib/storage/configuration/service_configuration.py +++ b/src/frostfs_testlib/storage/configuration/service_configuration.py @@ -4,13 +4,11 @@ from typing import Any import yaml -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell.interfaces import CommandOptions from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.dataclasses.node_base import ServiceClass -reporter = get_reporter() - class ServiceConfiguration(ServiceConfigurationYml): def __init__(self, service: "ServiceClass") -> None: diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 8ecada8..003bb6b 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -2,18 +2,16 @@ import copy from typing import Optional import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib import reporter from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import run_optionally -reporter = get_reporter() - class BackgroundLoadController: k6_dir: str @@ -86,7 +84,7 @@ class BackgroundLoadController: return all_endpoints[load_type][endpoint_selection_strategy] @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Prepare load instances") + @reporter.step("Prepare load instances") def prepare(self): self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) @@ -99,7 +97,7 @@ class BackgroundLoadController: self.started = True @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop load") + @reporter.step("Stop load") def stop(self): self.runner.stop() @@ -108,7 +106,7 @@ class BackgroundLoadController: return self.runner.is_running @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Reset load") + @reporter.step("Reset load") def _reset_for_consequent_load(self): """This method is required if we want to run multiple loads during test run. Raise load counter by 1 and append it to load_id @@ -118,7 +116,7 @@ class BackgroundLoadController: self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Startup load") + @reporter.step("Startup load") def startup(self): self.prepare() self.preset() @@ -129,7 +127,7 @@ class BackgroundLoadController: self.runner.preset() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop and get results of load") + @reporter.step("Stop and get results of load") def teardown(self, load_report: Optional[LoadReport] = None): if not self.started: return @@ -141,7 +139,7 @@ class BackgroundLoadController: load_report.add_summaries(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Run post-load verification") + @reporter.step("Run post-load verification") def verify(self): try: load_issues = self._collect_load_issues() @@ -153,7 +151,7 @@ class BackgroundLoadController: self._reset_for_consequent_load() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Collect load issues") + @reporter.step("Collect load issues") def _collect_load_issues(self): verifier = LoadVerifier(self.load_params) return verifier.collect_load_issues(self.load_summaries) @@ -163,7 +161,7 @@ class BackgroundLoadController: self.runner.wait_until_finish(soft_timeout) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify loaded objects") + @reporter.step("Verify loaded objects") def _run_verify_scenario(self) -> list[str]: self.verification_params = LoadParams( verify_clients=self.load_params.verify_clients, diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 7020671..35ab6c1 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -4,12 +4,12 @@ import time from typing import TypeVar import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.hosting.interfaces import HostStatus from frostfs_testlib.plugins import load_all -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider @@ -21,7 +21,6 @@ from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time -reporter = get_reporter() logger = logging.getLogger("NeoLogger") if_up_down_helper = IfUpDownHelper() @@ -76,7 +75,7 @@ class ClusterStateController: return online_svc @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop host of node {node}") + @reporter.step("Stop host of node {node}") def stop_node_host(self, node: ClusterNode, mode: str): # Drop ssh connection for this node before shutdown provider = SshConnectionProvider() @@ -88,7 +87,7 @@ class ClusterStateController: self._wait_for_host_offline(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Shutdown whole cluster") + @reporter.step("Shutdown whole cluster") def shutdown_cluster(self, mode: str, reversed_order: bool = False): nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes @@ -105,7 +104,7 @@ class ClusterStateController: self._wait_for_host_offline(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start host of node {node}") + @reporter.step("Start host of node {node}") def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True): with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() @@ -115,7 +114,7 @@ class ClusterStateController: self.wait_startup_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped hosts") + @reporter.step("Start stopped hosts") def start_stopped_hosts(self, reversed_order: bool = False): if not self.stopped_nodes: return @@ -133,35 +132,35 @@ class ClusterStateController: self.wait_after_storage_startup() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}") + @reporter.step("Detach disk {device} at {mountpoint} on node {node}") def detach_disk(self, node: StorageNode, device: str, mountpoint: str): disk_controller = self._get_disk_controller(node, device, mountpoint) self.detached_disks[disk_controller.id] = disk_controller disk_controller.detach() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}") + @reporter.step("Attach disk {device} at {mountpoint} on node {node}") def attach_disk(self, node: StorageNode, device: str, mountpoint: str): disk_controller = self._get_disk_controller(node, device, mountpoint) disk_controller.attach() self.detached_disks.pop(disk_controller.id, None) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Restore detached disks") + @reporter.step("Restore detached disks") def restore_disks(self): for disk_controller in self.detached_disks.values(): disk_controller.attach() self.detached_disks = {} @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all {service_type} services") + @reporter.step("Stop all {service_type} services") def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True): services = self.cluster.services(service_type) self.stopped_services.update(services) parallel([service.stop_service for service in services], mask=mask) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start all {service_type} services") + @reporter.step("Start all {service_type} services") def start_services_of_type(self, service_type: type[ServiceClass]): services = self.cluster.services(service_type) parallel([service.start_service for service in services]) @@ -176,24 +175,24 @@ class ClusterStateController: result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node" - @reporter.step_deco("Wait for S3Gates reconnection to local storage") + @reporter.step("Wait for S3Gates reconnection to local storage") def wait_s3gates(self): online_s3gates = self._get_online(S3Gate) if online_s3gates: parallel(self.wait_s3gate, online_s3gates) - @reporter.step_deco("Wait for cluster startup healtcheck") + @reporter.step("Wait for cluster startup healtcheck") def wait_startup_healthcheck(self): nodes = self.cluster.nodes(self._get_online(StorageNode)) parallel(self.healthcheck.startup_healthcheck, nodes) - @reporter.step_deco("Wait for storage reconnection to the system") + @reporter.step("Wait for storage reconnection to the system") def wait_after_storage_startup(self): self.wait_startup_healthcheck() self.wait_s3gates() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start all stopped services") + @reporter.step("Start all stopped services") def start_all_stopped_services(self): stopped_storages = self._get_stopped_by_type(StorageNode) parallel([service.start_service for service in self.stopped_services]) @@ -203,21 +202,21 @@ class ClusterStateController: self.wait_after_storage_startup() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop {service_type} service on {node}") + @reporter.step("Stop {service_type} service on {node}") def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): service = node.service(service_type) service.stop_service(mask) self.stopped_services.add(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start {service_type} service on {node}") + @reporter.step("Start {service_type} service on {node}") def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): service = node.service(service_type) service.start_service() self.stopped_services.discard(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start all stopped {service_type} services") + @reporter.step("Start all stopped {service_type} services") def start_stopped_services_of_type(self, service_type: type[ServiceClass]): stopped_svc = self._get_stopped_by_type(service_type) if not stopped_svc: @@ -231,7 +230,7 @@ class ClusterStateController: # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all storage services on cluster") + @reporter.step("Stop all storage services on cluster") def stop_all_storage_services(self, reversed_order: bool = False): nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes @@ -240,7 +239,7 @@ class ClusterStateController: # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all S3 gates on cluster") + @reporter.step("Stop all S3 gates on cluster") def stop_all_s3_gates(self, reversed_order: bool = False): nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes @@ -249,42 +248,42 @@ class ClusterStateController: # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop storage service on {node}") + @reporter.step("Stop storage service on {node}") def stop_storage_service(self, node: ClusterNode, mask: bool = True): self.stop_service_of_type(node, StorageNode, mask) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start storage service on {node}") + @reporter.step("Start storage service on {node}") def start_storage_service(self, node: ClusterNode): self.start_service_of_type(node, StorageNode) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped storage services") + @reporter.step("Start stopped storage services") def start_stopped_storage_services(self): self.start_stopped_services_of_type(StorageNode) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop s3 gate on {node}") + @reporter.step("Stop s3 gate on {node}") def stop_s3_gate(self, node: ClusterNode, mask: bool = True): self.stop_service_of_type(node, S3Gate, mask) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start s3 gate on {node}") + @reporter.step("Start s3 gate on {node}") def start_s3_gate(self, node: ClusterNode): self.start_service_of_type(node, S3Gate) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped S3 gates") + @reporter.step("Start stopped S3 gates") def start_stopped_s3_gates(self): self.start_stopped_services_of_type(S3Gate) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Suspend {process_name} service in {node}") + @reporter.step("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): node.host.wait_success_suspend_process(process_name) if self.suspended_services.get(process_name): @@ -293,20 +292,20 @@ class ClusterStateController: self.suspended_services[process_name] = [node] @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Resume {process_name} service in {node}") + @reporter.step("Resume {process_name} service in {node}") def resume_service(self, process_name: str, node: ClusterNode): node.host.wait_success_resume_process(process_name) if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]: self.suspended_services[process_name].remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start suspend processes services") + @reporter.step("Start suspend processes services") def resume_suspended_services(self): for process_name, list_nodes in self.suspended_services.items(): [node.host.wait_success_resume_process(process_name) for node in list_nodes] self.suspended_services = {} - @reporter.step_deco("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") + @reporter.step("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") def drop_traffic( self, mode: str, @@ -327,7 +326,7 @@ class ClusterStateController: time.sleep(wakeup_timeout) self.dropped_traffic.append(node) - @reporter.step_deco("Ping traffic") + @reporter.step("Ping traffic") def ping_traffic( self, node: ClusterNode, @@ -343,7 +342,7 @@ class ClusterStateController: return False return True - @reporter.step_deco("Start traffic to {node}") + @reporter.step("Start traffic to {node}") def restore_traffic( self, mode: str, @@ -358,12 +357,12 @@ class ClusterStateController: case "nodes": IpTablesHelper.restore_input_traffic_to_node(node=node) - @reporter.step_deco("Restore blocked nodes") + @reporter.step("Restore blocked nodes") def restore_all_traffic(self): parallel(self._restore_traffic_to_node, self.dropped_traffic) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Hard reboot host {node} via magic SysRq option") + @reporter.step("Hard reboot host {node} via magic SysRq option") def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True): shell = node.host.get_shell() shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') @@ -383,14 +382,14 @@ class ClusterStateController: if startup_healthcheck: self.wait_startup_healthcheck() - @reporter.step_deco("Down {interface} to {nodes}") + @reporter.step("Down {interface} to {nodes}") def down_interface(self, nodes: list[ClusterNode], interface: str): for node in nodes: if_up_down_helper.down_interface(node=node, interface=interface) assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN" self.nodes_with_modified_interface.append(node) - @reporter.step_deco("Up {interface} to {nodes}") + @reporter.step("Up {interface} to {nodes}") def up_interface(self, nodes: list[ClusterNode], interface: str): for node in nodes: if_up_down_helper.up_interface(node=node, interface=interface) @@ -398,17 +397,17 @@ class ClusterStateController: if node in self.nodes_with_modified_interface: self.nodes_with_modified_interface.remove(node) - @reporter.step_deco("Restore interface") + @reporter.step("Restore interface") def restore_interfaces(self): for node in self.nodes_with_modified_interface: if_up_down_helper.up_all_interface(node) - @reporter.step_deco("Get node time") + @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: shell = node.host.get_shell() return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") - @reporter.step_deco("Set node time to {in_date}") + @reporter.step("Set node time to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") @@ -417,7 +416,7 @@ class ClusterStateController: with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) - @reporter.step_deco(f"Restore time") + @reporter.step(f"Restore time") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() now_time = datetime.datetime.now(datetime.timezone.utc) @@ -425,14 +424,14 @@ class ClusterStateController: shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") shell.exec("hwclock --systohc") - @reporter.step_deco("Change the synchronizer status to {status}") + @reporter.step("Change the synchronizer status to {status}") def set_sync_date_all_nodes(self, status: str): if status == "active": parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) return parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) - @reporter.step_deco("Set MaintenanceModeAllowed - {status}") + @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: frostfs_adm = FrostfsAdm( shell=cluster_node.host.get_shell(), @@ -441,7 +440,7 @@ class ClusterStateController: ) frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") - @reporter.step_deco("Set mode node to {status}") + @reporter.step("Set mode node to {status}") def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None: rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() @@ -465,8 +464,7 @@ class ClusterStateController: self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node) - @wait_for_success(80, 8) - @reporter.step_deco("Check status node, status - {status}") + @wait_for_success(80, 8, title="Wait for storage status become {status}") def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode): frostfs_cli = FrostfsCli( shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG @@ -537,13 +535,13 @@ class ClusterStateController: interfaces.append(ip) return interfaces - @reporter.step_deco("Ping node") + @reporter.step("Ping node") def _ping_host(self, node: ClusterNode): options = CommandOptions(check=False) return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.ONLINE) - @reporter.step_deco("Waiting for {node} to go online") + @reporter.step("Waiting for {node} to go online") def _wait_for_host_online(self, node: ClusterNode): try: ping_result = self._ping_host(node) @@ -555,7 +553,7 @@ class ClusterStateController: return HostStatus.OFFLINE @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.OFFLINE) - @reporter.step_deco("Waiting for {node} to go offline") + @reporter.step("Waiting for {node} to go offline") def _wait_for_host_offline(self, node: ClusterNode): try: ping_result = self._ping_host(node) diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py index 078d483..66f72d6 100644 --- a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -1,13 +1,11 @@ from typing import Any -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.testing import parallel -reporter = get_reporter() - class ConfigStateManager(StateManager): def __init__(self, cluster_state_controller: ClusterStateController) -> None: @@ -15,7 +13,7 @@ class ConfigStateManager(StateManager): self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set() self.cluster = self.csc.cluster - @reporter.step_deco("Change configuration for {service_type} on all nodes") + @reporter.step("Change configuration for {service_type} on all nodes") def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): services = self.cluster.services(service_type) nodes = self.cluster.nodes(services) @@ -25,7 +23,7 @@ class ConfigStateManager(StateManager): parallel([node.config(service_type).set for node in nodes], values=values) self.csc.start_services_of_type(service_type) - @reporter.step_deco("Change configuration for {service_type} on {node}") + @reporter.step("Change configuration for {service_type} on {node}") def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): self.services_with_changed_config.add((node, service_type)) @@ -33,7 +31,7 @@ class ConfigStateManager(StateManager): node.config(service_type).set(values) self.csc.start_service_of_type(node, service_type) - @reporter.step_deco("Revert all configuration changes") + @reporter.step("Revert all configuration changes") def revert_all(self): if not self.services_with_changed_config: return @@ -44,7 +42,7 @@ class ConfigStateManager(StateManager): self.csc.start_all_stopped_services() # TODO: parallel can't have multiple parallel_items :( - @reporter.step_deco("Revert all configuration {node_and_service}") + @reporter.step("Revert all configuration {node_and_service}") def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): node, service_type = node_and_service self.csc.stop_service_of_type(node, service_type) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 4b9ffc2..ace0214 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -4,16 +4,14 @@ from typing import Optional, TypedDict, TypeVar import yaml +from frostfs_testlib import reporter from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils -reporter = get_reporter() - @dataclass class NodeBase(HumanReadableABC): diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index 0676813..49c6afd 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -1,7 +1,7 @@ import time from typing import Optional -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps import epoch @@ -9,15 +9,13 @@ from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.utils import datetime_utils -reporter = get_reporter() - # To skip adding every mandatory singleton dependency to EACH test function class ClusterTestBase: shell: Shell cluster: Cluster - @reporter.step_deco("Tick {epochs_to_tick} epochs, wait {wait_block} block") + @reporter.step("Tick {epochs_to_tick} epochs, wait {wait_block} block") def tick_epochs( self, epochs_to_tick: int, diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index e1dfcd1..41d52ab 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -19,10 +19,9 @@ from typing import Dict, List, TypedDict, Union import pexpect -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo -reporter = get_reporter() logger = logging.getLogger("NeoLogger") COLOR_GREEN = "\033[92m" COLOR_OFF = "\033[0m" @@ -65,9 +64,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = return cmd.decode() -def _attach_allure_log( - cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime -) -> None: +def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime) -> None: command_attachment = ( f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" diff --git a/src/frostfs_testlib/utils/env_utils.py b/src/frostfs_testlib/utils/env_utils.py index 6b4fb40..3fdebe1 100644 --- a/src/frostfs_testlib/utils/env_utils.py +++ b/src/frostfs_testlib/utils/env_utils.py @@ -1,13 +1,12 @@ import logging import re -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Read environment.properties") +@reporter.step("Read environment.properties") def read_env_properties(file_path: str) -> dict: with open(file_path, "r") as file: raw_content = file.read() @@ -23,7 +22,7 @@ def read_env_properties(file_path: str) -> dict: return env_properties -@reporter.step_deco("Update data in environment.properties") +@reporter.step("Update data in environment.properties") def save_env_properties(file_path: str, env_data: dict) -> None: with open(file_path, "a+") as env_file: for env, env_value in env_data.items(): diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index 507168e..5c4d52f 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -3,7 +3,7 @@ from dataclasses import dataclass from time import sleep from typing import Optional -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import neo_go_dump_keys @@ -15,12 +15,10 @@ from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time -reporter = get_reporter() - logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Check and return status of given service") +@reporter.step("Check and return status of given service") def service_status(service: str, shell: Shell) -> str: return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() @@ -73,14 +71,14 @@ class TopCommand: ) -@reporter.step_deco("Run `top` command with specified PID") +@reporter.step("Run `top` command with specified PID") def service_status_top(service: str, shell: Shell) -> TopCommand: pid = service_pid(service, shell) output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout return TopCommand.from_stdout(output, pid) -@reporter.step_deco("Restart service n times with sleep") +@reporter.step("Restart service n times with sleep") def multiple_restart( service_type: type[NodeBase], node: ClusterNode, @@ -95,8 +93,7 @@ def multiple_restart( sleep(sleep_interval) -@reporter.step_deco("Get status of list of services and check expected status") -@wait_for_success(60, 5) +@wait_for_success(60, 5, title="Wait for services become {expected_status} on node {cluster_node}") def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str): cmd = "" for service in service_list: @@ -112,8 +109,7 @@ def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceC ), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}" -@reporter.step_deco("Wait for active status of passed service") -@wait_for_success(60, 5) +@wait_for_success(60, 5, title="Wait for {service} become active") def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"): real_status = service_status(service=service, shell=shell) assert ( @@ -121,8 +117,7 @@ def wait_service_in_desired_state(service: str, shell: Shell, expected_status: O ), f"Service {service}: expected status= {expected_status}, real status {real_status}" -@reporter.step_deco("Run healthcheck against passed service") -@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1) +@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1, title="Wait for {service_type} passes healtcheck on {node}") def service_type_healthcheck( service_type: type[NodeBase], node: ClusterNode, @@ -133,26 +128,25 @@ def service_type_healthcheck( ), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}" -@reporter.step_deco("Kill by process name") +@reporter.step("Kill by process name") def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode): service_systemctl_name = node.service(service_type).get_service_systemctl_name() pid = service_pid(service_systemctl_name, node.host.get_shell()) node.host.get_shell().exec(f"sudo kill -9 {pid}") -@reporter.step_deco("Service {service} suspend") +@reporter.step("Suspend {service}") def suspend_service(shell: Shell, service: str): shell.exec(f"sudo kill -STOP {service_pid(service, shell)}") -@reporter.step_deco("Service {service} resume") +@reporter.step("Resume {service}") def resume_service(shell: Shell, service: str): shell.exec(f"sudo kill -CONT {service_pid(service, shell)}") -@reporter.step_deco("Retrieve service's pid") # retry mechanism cause when the task has been started recently '0' PID could be returned -@wait_for_success(10, 1) +@wait_for_success(10, 1, title="Get {service} pid") def service_pid(service: str, shell: Shell) -> int: output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip() splitted = output.split("=") @@ -161,7 +155,7 @@ def service_pid(service: str, shell: Shell) -> int: return PID -@reporter.step_deco("Wrapper for neo-go dump keys command") +@reporter.step("Wrapper for neo-go dump keys command") def dump_keys(shell: Shell, node: ClusterNode) -> dict: host = node.host service_config = host.get_service_config(node.service(MorphChain).name) @@ -169,7 +163,7 @@ def dump_keys(shell: Shell, node: ClusterNode) -> dict: return neo_go_dump_keys(shell=shell, wallet=wallet) -@reporter.step_deco("Wait for object replication") +@reporter.step("Wait for object replication") def wait_object_replication( cid: str, oid: str, diff --git a/src/frostfs_testlib/utils/file_keeper.py b/src/frostfs_testlib/utils/file_keeper.py index ad6836b..a5670cc 100644 --- a/src/frostfs_testlib/utils/file_keeper.py +++ b/src/frostfs_testlib/utils/file_keeper.py @@ -1,17 +1,15 @@ from concurrent.futures import ThreadPoolExecutor -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.dataclasses.node_base import NodeBase -reporter = get_reporter() - class FileKeeper: """This class is responsible to make backup copy of modified file and restore when required (mostly after the test)""" files_to_restore: dict[NodeBase, list[str]] = {} - @reporter.step_deco("Adding {file_to_restore} from node {node} to restore list") + @reporter.step("Adding {file_to_restore} from node {node} to restore list") def add(self, node: NodeBase, file_to_restore: str): if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]: # Already added @@ -26,7 +24,7 @@ class FileKeeper: shell = node.host.get_shell() shell.exec(f"cp {file_to_restore} {file_to_restore}.bak") - @reporter.step_deco("Restore files") + @reporter.step("Restore files") def restore_files(self): nodes = self.files_to_restore.keys() if not nodes: @@ -41,7 +39,7 @@ class FileKeeper: # Iterate through results for exception check if any pass - @reporter.step_deco("Restore files on node {node}") + @reporter.step("Restore files on node {node}") def _restore_files_on_node(self, node: NodeBase): shell = node.host.get_shell() for file_to_restore in self.files_to_restore[node]: diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index a41665e..d238106 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -4,10 +4,9 @@ import os import uuid from typing import Any, Optional -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -61,7 +60,7 @@ def generate_file_with_content( return file_path -@reporter.step_deco("Get File Hash") +@reporter.step("Get File Hash") def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: """Generates hash for the specified file. @@ -88,7 +87,7 @@ def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[in return file_hash.hexdigest() -@reporter.step_deco("Concatenation set of files to one file") +@reporter.step("Concatenation set of files to one file") def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: """Concatenates several files into a single file. From 17c1a4f14bfe14260ad8a9ad4027d1d0e589bc59 Mon Sep 17 00:00:00 2001 From: mkadilov Date: Fri, 1 Dec 2023 15:54:28 +0300 Subject: [PATCH 091/274] [#136] Added exclude_filter Added exclude_filter Signed-off-by: Mikhail Kadilov --- src/frostfs_testlib/hosting/docker_host.py | 5 +++++ src/frostfs_testlib/hosting/interfaces.py | 1 + 2 files changed, 6 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 0e4ea11..17146c0 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -235,6 +235,7 @@ class DockerHost(Host): since: Optional[datetime] = None, until: Optional[datetime] = None, unit: Optional[str] = None, + exclude_filter: Optional[str] = None, ) -> str: client = self._get_docker_client() filtered_logs = "" @@ -248,6 +249,10 @@ class DockerHost(Host): matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE) found = list(matches) + + if exclude_filter: + found = [match for match in found if match != exclude_filter] + if found: filtered_logs += f"{container_name}:\n{os.linesep.join(found)}" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 84b7911..9dd6f3c 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -287,6 +287,7 @@ class Host(ABC): since: Optional[datetime] = None, until: Optional[datetime] = None, unit: Optional[str] = None, + exclude_filter: Optional[str] = None, ) -> str: """Get logs from host filtered by regex. From e65fc359fe3c3e1019b6fb98610cd612e93fc26f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 1 Dec 2023 14:15:12 +0300 Subject: [PATCH 092/274] [#134] Add method uptime service Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/storage/dataclasses/node_base.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index ace0214..4fc7dea 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -1,8 +1,10 @@ from abc import abstractmethod from dataclasses import dataclass +from datetime import datetime, timezone from typing import Optional, TypedDict, TypeVar import yaml +from dateutil import parser from frostfs_testlib import reporter from frostfs_testlib.hosting.config import ServiceConfig @@ -170,6 +172,15 @@ class NodeBase(HumanReadableABC): def _get_service_config(self) -> ServiceConfig: return self.host.get_service_config(self.name) + def get_service_uptime(self, service: str) -> datetime: + result = self.host.get_shell().exec( + f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2" + ) + start_time = parser.parse(result.stdout.strip()) + current_time = datetime.now(tz=timezone.utc) + active_time = current_time - start_time + return active_time + ServiceClass = TypeVar("ServiceClass", bound=NodeBase) From 81dfc723dae06b4ef4a2d87a1f0805651e950966 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 4 Dec 2023 17:59:29 +0300 Subject: [PATCH 093/274] [#137] Ability to control remote processes id and reports for load Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/k6.py | 33 ++++--- src/frostfs_testlib/load/load_report.py | 59 ++++++------- .../processes/remote_process.py | 86 ++++++++++++++++--- .../controllers/background_load_controller.py | 34 +++++++- .../controllers/cluster_state_controller.py | 10 ++- 5 files changed, 159 insertions(+), 63 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 92da8e0..2ce7c75 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -34,7 +34,6 @@ class LoadResults: class K6: _k6_process: RemoteProcess - _start_time: datetime def __init__( self, @@ -61,6 +60,18 @@ class K6: self._k6_dir: str = k6_dir + command = ( + f"{self._k6_dir}/k6 run {self._generate_env_variables()} " + f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" + ) + user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None + process_id = ( + self.load_params.load_id + if self.load_params.scenario != LoadScenario.VERIFY + else f"{self.load_params.load_id}_verify" + ) + self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id) + @property def process_dir(self) -> str: return self._k6_process.process_dir @@ -111,15 +122,15 @@ class K6: reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) + def get_start_time(self) -> datetime: + return datetime.fromtimestamp(self._k6_process.start_time()) + + def get_end_time(self) -> datetime: + return datetime.fromtimestamp(self._k6_process.end_time()) + def start(self) -> None: with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): - self._start_time = int(datetime.utcnow().timestamp()) - command = ( - f"{self._k6_dir}/k6 run {self._generate_env_variables()} " - f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" - ) - user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None - self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user) + self._k6_process.start() def wait_until_finished(self, soft_timeout: int = 0) -> None: with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): @@ -128,8 +139,10 @@ class K6: else: timeout = self.load_params.load_time or 0 + start_time = int(self.get_start_time().timestamp()) + current_time = int(datetime.utcnow().timestamp()) - working_time = current_time - self._start_time + working_time = current_time - start_time remaining_time = timeout - working_time setup_teardown_time = ( @@ -146,7 +159,7 @@ class K6: original_timeout = timeout timeouts = { - "K6 start time": self._start_time, + "K6 start time": start_time, "Current time": current_time, "K6 working time": working_time, "Remaining time for load": remaining_time, diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index ad3a26d..105d852 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -17,11 +17,15 @@ class LoadReport: self.start_time: Optional[datetime] = None self.end_time: Optional[datetime] = None - def set_start_time(self): - self.start_time = datetime.utcnow() + def set_start_time(self, time: datetime = None): + if time is None: + time = datetime.utcnow() + self.start_time = time - def set_end_time(self): - self.end_time = datetime.utcnow() + def set_end_time(self, time: datetime = None): + if time is None: + time = datetime.utcnow() + self.end_time = time def add_summaries(self, load_summaries: dict): self.load_summaries_list.append(load_summaries) @@ -31,6 +35,7 @@ class LoadReport: def get_report_html(self): report_sections = [ + [self.load_params, self._get_load_id_section_html], [self.load_test, self._get_load_params_section_html], [self.load_summaries_list, self._get_totals_section_html], [self.end_time, self._get_test_time_html], @@ -44,9 +49,7 @@ class LoadReport: return html def _get_load_params_section_html(self) -> str: - params: str = yaml.safe_dump( - [self.load_test], sort_keys=False, indent=2, explicit_start=True - ) + params: str = yaml.safe_dump([self.load_test], sort_keys=False, indent=2, explicit_start=True) params = params.replace("\n", "
").replace(" ", " ") section_html = f"""

Scenario params

@@ -55,8 +58,17 @@ class LoadReport: return section_html + def _get_load_id_section_html(self) -> str: + section_html = f"""

Load ID: {self.load_params.load_id}

+
""" + + return section_html + def _get_test_time_html(self) -> str: - html = f"""

Scenario duration in UTC time (from agent)

+ if not self.start_time or not self.end_time: + return "" + + html = f"""

Scenario duration

{self.start_time} - {self.end_time}

""" @@ -97,7 +109,7 @@ class LoadReport: LoadScenario.gRPC_CAR: "open model", LoadScenario.S3_CAR: "open model", LoadScenario.LOCAL: "local fill", - LoadScenario.S3_LOCAL: "local fill" + LoadScenario.S3_LOCAL: "local fill", } return model_map[self.load_params.scenario] @@ -124,10 +136,7 @@ class LoadReport: total_errors: int = 0 for node_key, errors in errors.items(): total_errors += errors - if ( - self.load_params.k6_process_allocation_strategy - == K6ProcessAllocationStrategy.PER_ENDPOINT - ): + if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: per_node_errors_html += self._row(f"At {node_key}", errors) latency_html = "" @@ -139,9 +148,7 @@ class LoadReport: for param_name, param_val in latency_dict.items(): latency_values += f"{param_name}={param_val:.2f}ms " - latency_html += self._row( - f"{operation_type} latency {node_key.split(':')[0]}", latency_values - ) + latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values) object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) @@ -180,9 +187,7 @@ class LoadReport: write_latency = {} write_errors = {} requested_write_rate = self.load_params.write_rate - requested_write_rate_str = ( - f"{requested_write_rate}op/sec" if requested_write_rate else "" - ) + requested_write_rate_str = f"{requested_write_rate}op/sec" if requested_write_rate else "" read_operations = 0 read_op_sec = 0 @@ -197,20 +202,12 @@ class LoadReport: delete_latency = {} delete_errors = {} requested_delete_rate = self.load_params.delete_rate - requested_delete_rate_str = ( - f"{requested_delete_rate}op/sec" if requested_delete_rate else "" - ) + requested_delete_rate_str = f"{requested_delete_rate}op/sec" if requested_delete_rate else "" if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: - delete_vus = max( - self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0 - ) - write_vus = max( - self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0 - ) - read_vus = max( - self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0 - ) + delete_vus = max(self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0) + write_vus = max(self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0) + read_vus = max(self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0) else: write_vus = self.load_params.writers read_vus = self.load_params.readers diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 1252b97..5624940 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -15,21 +15,33 @@ from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions class RemoteProcess: - def __init__(self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector]): + def __init__( + self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector], proc_id: str + ): self.process_dir = process_dir self.cmd = cmd self.stdout_last_line_number = 0 self.stderr_last_line_number = 0 self.pid: Optional[str] = None self.proc_rc: Optional[int] = None + self.proc_start_time: Optional[int] = None + self.proc_end_time: Optional[int] = None self.saved_stdout: Optional[str] = None self.saved_stderr: Optional[str] = None self.shell = shell + self.proc_id: str = proc_id self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] @classmethod @reporter.step("Create remote process") - def create(cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None) -> RemoteProcess: + def create( + cls, + command: str, + shell: Shell, + working_dir: str = "/tmp", + user: Optional[str] = None, + proc_id: Optional[str] = None, + ) -> RemoteProcess: """ Create a process on a remote host. @@ -40,6 +52,7 @@ class RemoteProcess: stderr: contains script errors stdout: contains script output user: user on behalf whom command will be executed + proc_id: process string identificator Args: shell: Shell instance @@ -49,19 +62,31 @@ class RemoteProcess: Returns: RemoteProcess instance for further examination """ + if proc_id is None: + proc_id = f"{uuid.uuid4()}" + cmd_inspector = SuInspector(user) if user else None remote_process = cls( cmd=command, - process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), + process_dir=os.path.join(working_dir, f"proc_{proc_id}"), shell=shell, cmd_inspector=cmd_inspector, + proc_id=proc_id, ) - remote_process._create_process_dir() - remote_process._generate_command_script(command) - remote_process._start_process() - remote_process.pid = remote_process._get_pid() + return remote_process + @reporter.step("Start remote process") + def start(self): + """ + Starts a process on a remote host. + """ + + self._create_process_dir() + self._generate_command_script() + self._start_process() + self.pid = self._get_pid() + @reporter.step("Get process stdout") def stdout(self, full: bool = False) -> str: """ @@ -130,17 +155,48 @@ class RemoteProcess: if self.proc_rc is not None: return self.proc_rc + result = self._cat_proc_file("rc") + if not result: + return None + + self.proc_rc = int(result) + return self.proc_rc + + @reporter.step("Get process start time") + def start_time(self) -> Optional[int]: + if self.proc_start_time is not None: + return self.proc_start_time + + result = self._cat_proc_file("start_time") + if not result: + return None + + self.proc_start_time = int(result) + return self.proc_start_time + + @reporter.step("Get process end time") + def end_time(self) -> Optional[int]: + if self.proc_end_time is not None: + return self.proc_end_time + + result = self._cat_proc_file("end_time") + if not result: + return None + + self.proc_end_time = int(result) + return self.proc_end_time + + def _cat_proc_file(self, file: str) -> Optional[str]: terminal = self.shell.exec( - f"cat {self.process_dir}/rc", + f"cat {self.process_dir}/{file}", CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True), ) if "No such file or directory" in terminal.stderr: return None elif terminal.stderr or terminal.return_code != 0: - raise AssertionError(f"cat process rc was not successful: {terminal.stderr}") + raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") - self.proc_rc = int(terminal.stdout) - return self.proc_rc + return terminal.stdout @reporter.step("Check if process is running") def running(self) -> bool: @@ -195,17 +251,19 @@ class RemoteProcess: return terminal.stdout.strip() @reporter.step("Generate command script") - def _generate_command_script(self, command: str) -> None: - command = command.replace('"', '\\"').replace("\\", "\\\\") + def _generate_command_script(self) -> None: + command = self.cmd.replace('"', '\\"').replace("\\", "\\\\") script = ( f"#!/bin/bash\n" f"cd {self.process_dir}\n" + f"date +%s > {self.process_dir}/start_time\n" f"{command} &\n" f"pid=\$!\n" f"cd {self.process_dir}\n" f"echo \$pid > {self.process_dir}/pid\n" f"wait \$pid\n" - f"echo $? > {self.process_dir}/rc" + f"echo $? > {self.process_dir}/rc\n" + f"date +%s > {self.process_dir}/end_time\n" ) self.shell.exec( diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 003bb6b..5f2ed99 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -1,4 +1,5 @@ import copy +from datetime import datetime from typing import Optional import frostfs_testlib.resources.optionals as optionals @@ -10,6 +11,7 @@ from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.test_control import run_optionally @@ -26,6 +28,7 @@ class BackgroundLoadController: endpoints: list[str] runner: ScenarioRunner started: bool + load_reporters: list[LoadReport] def __init__( self, @@ -45,6 +48,7 @@ class BackgroundLoadController: self.loaders_wallet = loaders_wallet self.runner = runner self.started = False + self.load_reporters = [] if load_params.endpoint_selection_strategy is None: raise RuntimeError("endpoint_selection_strategy should not be None") @@ -83,12 +87,20 @@ class BackgroundLoadController: return all_endpoints[load_type][endpoint_selection_strategy] + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Init k6 instances") + def init_k6(self): + self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) + self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step("Prepare load instances") def prepare(self): - self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) - self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) + self.init_k6() + + def append_reporter(self, load_report: LoadReport): + self.load_reporters.append(load_report) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) def start(self): @@ -128,16 +140,30 @@ class BackgroundLoadController: @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step("Stop and get results of load") - def teardown(self, load_report: Optional[LoadReport] = None): + def teardown(self): if not self.started: return self.stop() self.load_summaries = self._get_results() self.started = False - if load_report: + + start_time = min(self._get_start_times()) + end_time = max(self._get_end_times()) + + for load_report in self.load_reporters: + load_report.set_start_time(start_time) + load_report.set_end_time(end_time) load_report.add_summaries(self.load_summaries) + def _get_start_times(self) -> list[datetime]: + futures = parallel([k6.get_start_time for k6 in self.runner.get_k6_instances()]) + return [future.result() for future in futures] + + def _get_end_times(self) -> list[datetime]: + futures = parallel([k6.get_end_time for k6 in self.runner.get_k6_instances()]) + return [future.result() for future in futures] + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step("Run post-load verification") def verify(self): diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 35ab6c1..301b636 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -540,8 +540,9 @@ class ClusterStateController: options = CommandOptions(check=False) return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code - @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.ONLINE) - @reporter.step("Waiting for {node} to go online") + @retry( + max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online" + ) def _wait_for_host_online(self, node: ClusterNode): try: ping_result = self._ping_host(node) @@ -552,8 +553,9 @@ class ClusterStateController: logger.warning(f"Host ping fails with error {err}") return HostStatus.OFFLINE - @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.OFFLINE) - @reporter.step("Waiting for {node} to go offline") + @retry( + max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline" + ) def _wait_for_host_offline(self, node: ClusterNode): try: ping_result = self._ping_host(node) From ae566b413b04c747b27c63a044368eb1c219db92 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 30 Nov 2023 13:50:57 +0300 Subject: [PATCH 094/274] [#139] Use readers for init time calculation Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 78 +++++++++++-------------- tests/test_load_config.py | 32 +++++++--- 2 files changed, 57 insertions(+), 53 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 735d8ec..c1c98fe 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -40,11 +40,18 @@ all_load_scenarios = [ LoadScenario.gRPC_CAR, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART, - LoadScenario.S3_LOCAL + LoadScenario.S3_LOCAL, ] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] -constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL] +constant_vus_scenarios = [ + LoadScenario.gRPC, + LoadScenario.S3, + LoadScenario.HTTP, + LoadScenario.LOCAL, + LoadScenario.S3_MULTIPART, + LoadScenario.S3_LOCAL, +] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] grpc_preset_scenarios = [ @@ -124,13 +131,9 @@ class Preset: # ------ GRPC ------ # Amount of containers which should be created - containers_count: Optional[int] = metadata_field( - grpc_preset_scenarios, "containers", None, False - ) + containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) # Container placement policy for containers for gRPC - container_placement_policy: Optional[str] = metadata_field( - grpc_preset_scenarios, "policy", None, False - ) + container_placement_policy: Optional[str] = metadata_field(grpc_preset_scenarios, "policy", None, False) # ------ S3 ------ # Amount of buckets which should be created @@ -180,7 +183,14 @@ class LoadParams: awscli_url: Optional[str] = None # No ssl verification flag no_verify_ssl: Optional[bool] = metadata_field( - [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL, LoadScenario.VERIFY, LoadScenario.HTTP], + [ + LoadScenario.S3, + LoadScenario.S3_CAR, + LoadScenario.S3_MULTIPART, + LoadScenario.S3_LOCAL, + LoadScenario.VERIFY, + LoadScenario.HTTP, + ], "no-verify-ssl", "NO_VERIFY_SSL", False, @@ -198,9 +208,7 @@ class LoadParams: # Specifies the minimum duration of every single execution (i.e. iteration). # Any iterations that are shorter than this value will cause that VU to # sleep for the remainder of the time until the specified minimum duration is reached. - min_iteration_duration: Optional[str] = metadata_field( - all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False - ) + min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False) # Prepare/cut objects locally on client before sending prepare_locally: Optional[bool] = metadata_field( [LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False @@ -225,46 +233,34 @@ class LoadParams: # ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS ------- # Number of iterations to start during each timeUnit period for write. - write_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True - ) + write_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True) # Number of iterations to start during each timeUnit period for read. - read_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "READ_RATE", True, True - ) + read_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "READ_RATE", True, True) # Number of iterations to start during each timeUnit period for delete. - delete_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True - ) + delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True) # Amount of preAllocatedVUs for write operations. preallocated_writers: Optional[int] = metadata_field( constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True ) # Amount of maxVUs for write operations. - max_writers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True - ) + max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True) # Amount of preAllocatedVUs for read operations. preallocated_readers: Optional[int] = metadata_field( constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True ) # Amount of maxVUs for read operations. - max_readers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_READERS", False, True - ) + max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True) # Amount of preAllocatedVUs for read operations. preallocated_deleters: Optional[int] = metadata_field( constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True ) # Amount of maxVUs for delete operations. - max_deleters: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True - ) + max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True) # Multipart # Number of parts to upload in parallel @@ -272,20 +268,18 @@ class LoadParams: [LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True ) # part size must be greater than (5 MB) - write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False) + write_object_part_size: Optional[int] = metadata_field( + [LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False + ) # Period of time to apply the rate value. - time_unit: Optional[str] = metadata_field( - constant_arrival_rate_scenarios, None, "TIME_UNIT", False - ) + time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False) # ------- VERIFY SCENARIO PARAMS ------- # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False) # Amount of Verification VU. - verify_clients: Optional[int] = metadata_field( - [LoadScenario.VERIFY], None, "CLIENTS", True, False - ) + verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True, False) # ------- LOCAL SCENARIO PARAMS ------- # Config file location (filled automatically) @@ -341,10 +335,8 @@ class LoadParams: return math.ceil(self._get_total_vus() * self.vu_init_time) def _get_total_vus(self) -> int: - vu_fields = ["writers", "preallocated_writers"] - data_fields = [ - getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields - ] + vu_fields = ["writers", "preallocated_writers", "readers", "preallocated_readers"] + data_fields = [getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields] return sum(data_fields) def _get_applicable_fields(self): @@ -375,9 +367,7 @@ class LoadParams: ] for field in data_fields: - actual_field_type = ( - get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) - ) + actual_field_type = get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) if is_dataclass(actual_field_type) and getattr(instance, field.name): fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name)) diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 256a04b..926399b 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -12,6 +12,7 @@ from frostfs_testlib.load.load_config import ( ReadFrom, ) from frostfs_testlib.load.runners import DefaultRunner +from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode @@ -53,6 +54,25 @@ class TestLoadConfig: assert repr(load_params) == expected assert f"{load_params}" == expected + def test_load_params_init_time(self): + load_params = LoadParams(load_type=LoadType.S3) + vus = 100 + + load_params.vu_init_time = BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME + # Used in time calculations + load_params.readers = vus + load_params.writers = vus + load_params.preallocated_readers = vus + load_params.preallocated_writers = vus + + # Not used in time calculations + load_params.deleters = vus + load_params.preallocated_deleters = vus + + expected = vus * 4 * BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME + actual = load_params.get_init_time() + assert actual == expected, "Incorrect time for get_init_time()" + def test_load_params_initially_have_all_values_none(self): load_params = LoadParams(load_type=LoadType.S3) self._check_all_values_none(load_params, ["load_type", "scenario"]) @@ -285,9 +305,7 @@ class TestLoadConfig: self._check_preset_params(load_params, expected_preset_args) self._check_env_vars(load_params, expected_env_vars) - @pytest.mark.parametrize( - "load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True - ) + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): expected_env_vars = { "CLIENTS": 14, @@ -299,9 +317,7 @@ class TestLoadConfig: self._check_env_vars(load_params, expected_env_vars) - @pytest.mark.parametrize( - "load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True - ) + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True) def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): expected_env_vars = { "CLIENTS": 14, @@ -339,9 +355,7 @@ class TestLoadConfig: self._check_preset_params(load_params, expected_preset_args) self._check_env_vars(load_params, expected_env_vars) - @pytest.mark.parametrize( - "load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True - ) + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True) def test_empty_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): expected_preset_args = [ "--size '0'", From 247d2fbab7d3cfb475da32d282651a351028eb4b Mon Sep 17 00:00:00 2001 From: anurindm Date: Tue, 21 Nov 2023 10:20:01 +0300 Subject: [PATCH 095/274] Added logger config path attribute to NodeBase class Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/storage/constants.py | 1 + src/frostfs_testlib/storage/dataclasses/node_base.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 9ad24eb..b1b7995 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -6,6 +6,7 @@ class ConfigAttributes: CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" SHARD_CONFIG_PATH = "shard_config_path" + LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" LOCAL_WALLET_CONFIG = "local_config_path" ENDPOINT_DATA_0 = "endpoint_data0" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 4fc7dea..bf36665 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -120,6 +120,14 @@ class NodeBase(HumanReadableABC): ConfigAttributes.WALLET_CONFIG, ) + def get_logger_config_path(self) -> str: + """ + Returns config path for logger located on remote host + """ + config_attributes = self.host.get_service_config(self.name) + return self._get_attribute( + ConfigAttributes.LOGGER_CONFIG_PATH) if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes else None + @property def config_dir(self) -> str: return self._get_attribute(ConfigAttributes.CONFIG_DIR) From 54d26b226c4c2099d9f894281d5901f8579b1915 Mon Sep 17 00:00:00 2001 From: mkadilov Date: Mon, 11 Dec 2023 14:20:06 +0300 Subject: [PATCH 096/274] [#140] Executive command changed Added exception of error 'Too many requests' in log analyzer Signed-off-by: Mikhail Kadilov m.kadilov@yadro.com --- src/frostfs_testlib/hosting/docker_host.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 17146c0..05cd4b2 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -247,11 +247,10 @@ class DockerHost(Host): logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") continue + if exclude_filter: + filtered_logs = filtered_logs.replace(exclude_filter, "") matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE) found = list(matches) - - if exclude_filter: - found = [match for match in found if match != exclude_filter] if found: filtered_logs += f"{container_name}:\n{os.linesep.join(found)}" From f1264bd47331837a30b8e56dc54612dcfbe3b534 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 12 Dec 2023 09:38:38 +0300 Subject: [PATCH 097/274] [#143] Change network utils Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/hosting/interfaces.py | 29 ++++++-- .../shell/command_inspectors.py | 2 +- src/frostfs_testlib/steps/network.py | 72 ++----------------- .../controllers/cluster_state_controller.py | 65 +++++------------ 4 files changed, 48 insertions(+), 120 deletions(-) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 9dd6f3c..daea6eb 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -5,6 +5,7 @@ from typing import Optional from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig from frostfs_testlib.shell.interfaces import Shell from frostfs_testlib.testing.readable import HumanReadableEnum +from frostfs_testlib.testing.test_control import retry class HostStatus(HumanReadableEnum): @@ -25,9 +26,7 @@ class Host(ABC): def __init__(self, config: HostConfig) -> None: self._config = config - self._service_config_by_name = { - service_config.name: service_config for service_config in config.services - } + self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} @property @@ -323,9 +322,7 @@ class Host(ABC): """ @abstractmethod - def wait_for_service_to_be_in_state( - self, systemd_service_name: str, expected_state: str, timeout: int - ) -> None: + def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: """ Waites for service to be in specified state. @@ -335,3 +332,23 @@ class Host(ABC): timeout: Seconds to wait """ + + def down_interface(self, interface: str) -> None: + shell = self.get_shell() + shell.exec(f"ip link set {interface} down") + + def up_interface(self, interface: str) -> None: + shell = self.get_shell() + shell.exec(f"ip link set {interface} up") + + def check_state(self, interface: str) -> str: + shell = self.get_shell() + return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip() + + @retry(max_attempts=5, sleep_interval=5, expected_result="UP") + def check_state_up(self, interface: str) -> str: + return self.check_state(interface=interface) + + @retry(max_attempts=5, sleep_interval=5, expected_result="DOWN") + def check_state_down(self, interface: str) -> str: + return self.check_state(interface=interface) diff --git a/src/frostfs_testlib/shell/command_inspectors.py b/src/frostfs_testlib/shell/command_inspectors.py index 8fe2f34..0003017 100644 --- a/src/frostfs_testlib/shell/command_inspectors.py +++ b/src/frostfs_testlib/shell/command_inspectors.py @@ -9,7 +9,7 @@ class SudoInspector(CommandInspector): def inspect(self, original_command: str, command: str) -> str: if not command.startswith("sudo"): - return f"sudo {command}" + return f"sudo -i {command}" return command diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py index 64e235a..efaaf5a 100644 --- a/src/frostfs_testlib/steps/network.py +++ b/src/frostfs_testlib/steps/network.py @@ -1,77 +1,19 @@ -from frostfs_testlib import reporter +from frostfs_testlib.shell import CommandOptions from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.testing.test_control import retry -class IpTablesHelper: - @staticmethod - def drop_input_traffic_to_port(node: ClusterNode, ports: list[str]) -> None: - shell = node.host.get_shell() - for port in ports: - shell.exec(f"iptables -A INPUT -p tcp --dport {port} -j DROP") - +class IpHelper: @staticmethod def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: shell = node.host.get_shell() for ip in block_ip: - shell.exec(f"iptables -A INPUT -s {ip} -j DROP") - - @staticmethod - def restore_input_traffic_to_port(node: ClusterNode) -> None: - shell = node.host.get_shell() - ports = shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'").stdout.strip().split("\n") - if ports[0] == "": - return - for port in ports: - shell.exec(f"iptables -D INPUT -p tcp --dport {port.split(':')[-1]} -j DROP") + shell.exec(f"ip route add blackhole {ip}") @staticmethod def restore_input_traffic_to_node(node: ClusterNode) -> None: shell = node.host.get_shell() - unlock_ip = shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'").stdout.strip().split("\n") - if unlock_ip[0] == "": + unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False)) + if unlock_ip.return_code != 0: return - for ip in unlock_ip: - shell.exec(f"iptables -D INPUT -s {ip} -j DROP") - - -# TODO Move class to HOST -class IfUpDownHelper: - @reporter.step("Down {interface} to {node}") - def down_interface(self, node: ClusterNode, interface: str) -> None: - shell = node.host.get_shell() - shell.exec(f"ifdown {interface}") - - @reporter.step("Up {interface} to {node}") - def up_interface(self, node: ClusterNode, interface: str) -> None: - shell = node.host.get_shell() - shell.exec(f"ifup {interface}") - - @reporter.step("Up all interface to {node}") - def up_all_interface(self, node: ClusterNode) -> None: - shell = node.host.get_shell() - interfaces = list(node.host.config.interfaces.keys()) - shell.exec("ifup -av") - for name_interface in interfaces: - self.check_state_up(node, name_interface) - - @reporter.step("Down all interface to {node}") - def down_all_interface(self, node: ClusterNode) -> None: - shell = node.host.get_shell() - interfaces = list(node.host.config.interfaces.keys()) - shell.exec("ifdown -av") - for name_interface in interfaces: - self.check_state_down(node, name_interface) - - @reporter.step("Check {node} to {interface}") - def check_state(self, node: ClusterNode, interface: str) -> str: - shell = node.host.get_shell() - return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip() - - @retry(max_attempts=5, sleep_interval=5, expected_result="UP") - def check_state_up(self, node: ClusterNode, interface: str) -> str: - return self.check_state(node=node, interface=interface) - - @retry(max_attempts=5, sleep_interval=5, expected_result="DOWN") - def check_state_down(self, node: ClusterNode, interface: str) -> str: - return self.check_state(node=node, interface=interface) + for ip in unlock_ip.stdout.strip().split("\n"): + shell.exec(f"ip route del blackhole {ip.split(' ')[1]}") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 301b636..290503c 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -13,7 +13,7 @@ from frostfs_testlib.plugins import load_all from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider -from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper +from frostfs_testlib.steps.network import IpHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass @@ -22,7 +22,6 @@ from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for from frostfs_testlib.utils.datetime_utils import parse_time logger = logging.getLogger("NeoLogger") -if_up_down_helper = IfUpDownHelper() class StateManager: @@ -305,57 +304,25 @@ class ClusterStateController: [node.host.wait_success_resume_process(process_name) for node in list_nodes] self.suspended_services = {} - @reporter.step("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") + @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") def drop_traffic( self, - mode: str, node: ClusterNode, wakeup_timeout: int, - ports: list[str] = None, + name_interface: str, block_nodes: list[ClusterNode] = None, ) -> None: - allowed_modes = ["ports", "nodes"] - assert mode in allowed_modes - - match mode: - case "ports": - IpTablesHelper.drop_input_traffic_to_port(node, ports) - case "nodes": - list_ip = self._parse_intefaces(block_nodes) - IpTablesHelper.drop_input_traffic_to_node(node, list_ip) + list_ip = self._parse_interfaces(block_nodes, name_interface) + IpHelper.drop_input_traffic_to_node(node, list_ip) time.sleep(wakeup_timeout) self.dropped_traffic.append(node) - @reporter.step("Ping traffic") - def ping_traffic( - self, - node: ClusterNode, - nodes_list: list[ClusterNode], - expect_result: int, - ) -> bool: - shell = node.host.get_shell() - options = CommandOptions(check=False) - ips = self._parse_intefaces(nodes_list) - for ip in ips: - code = shell.exec(f"ping {ip} -c 1", options).return_code - if code != expect_result: - return False - return True - @reporter.step("Start traffic to {node}") def restore_traffic( self, - mode: str, node: ClusterNode, ) -> None: - allowed_modes = ["ports", "nodes"] - assert mode in allowed_modes - - match mode: - case "ports": - IpTablesHelper.restore_input_traffic_to_port(node=node) - case "nodes": - IpTablesHelper.restore_input_traffic_to_node(node=node) + IpHelper.restore_input_traffic_to_node(node=node) @reporter.step("Restore blocked nodes") def restore_all_traffic(self): @@ -385,22 +352,25 @@ class ClusterStateController: @reporter.step("Down {interface} to {nodes}") def down_interface(self, nodes: list[ClusterNode], interface: str): for node in nodes: - if_up_down_helper.down_interface(node=node, interface=interface) - assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN" + node.host.down_interface(interface=interface) + assert node.host.check_state(interface=interface) == "DOWN" self.nodes_with_modified_interface.append(node) @reporter.step("Up {interface} to {nodes}") def up_interface(self, nodes: list[ClusterNode], interface: str): for node in nodes: - if_up_down_helper.up_interface(node=node, interface=interface) - assert if_up_down_helper.check_state(node=node, interface=interface) == "UP" + node.host.up_interface(interface=interface) + assert node.host.check_state(interface=interface) == "UP" if node in self.nodes_with_modified_interface: self.nodes_with_modified_interface.remove(node) @reporter.step("Restore interface") def restore_interfaces(self): for node in self.nodes_with_modified_interface: - if_up_down_helper.up_all_interface(node) + dict_interfaces = node.host.config.interfaces.keys() + for name_interface in dict_interfaces: + if "mgmt" not in name_interface: + node.host.up_interface(interface=name_interface) @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: @@ -523,15 +493,14 @@ class ClusterStateController: return disk_controller def _restore_traffic_to_node(self, node): - IpTablesHelper.restore_input_traffic_to_port(node) - IpTablesHelper.restore_input_traffic_to_node(node) + IpHelper.restore_input_traffic_to_node(node) - def _parse_intefaces(self, nodes: list[ClusterNode]): + def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str): interfaces = [] for node in nodes: dict_interfaces = node.host.config.interfaces for type, ip in dict_interfaces.items(): - if "mgmt" not in type: + if name_interface in type: interfaces.append(ip) return interfaces From be964e731f8b0ff81873ea7d39684bebee2db371 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 13 Dec 2023 13:59:37 +0300 Subject: [PATCH 098/274] [#146] Prettify verifier messages for error rates Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_verifiers.py | 40 +++++++++++++--------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index fe39862..5ca92dc 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -44,22 +44,20 @@ class LoadVerifier: if deleters and not delete_operations: issues.append(f"No any delete operation was performed") - if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold: - issues.append( - f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" - ) - if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold: - issues.append( - f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" - ) - if ( - delete_operations - and deleters - and delete_errors / delete_operations * 100 > self.load_params.error_threshold - ): - issues.append( - f"Delete error rate is greater than threshold: {delete_errors / delete_operations * 100} > {self.load_params.error_threshold}" - ) + error_rate = self._get_error_rate(writers, write_operations, write_errors) + if error_rate > self.load_params.error_threshold: + rate_str = self._get_rate_str(error_rate) + issues.append(f"Write errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") + + error_rate = self._get_error_rate(readers, read_operations, read_errors) + if error_rate > self.load_params.error_threshold: + rate_str = self._get_rate_str(error_rate) + issues.append(f"Read errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") + + error_rate = self._get_error_rate(deleters, delete_operations, delete_errors) + if error_rate > self.load_params.error_threshold: + rate_str = self._get_rate_str(error_rate) + issues.append(f"Delete errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") return issues @@ -76,6 +74,16 @@ class LoadVerifier: ) return verify_issues + def _get_error_rate(self, vus: int, operations: int, errors: int) -> float: + if not operations or not vus: + return 0 + + error_rate = errors / operations * 100 + return error_rate + + def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str: + return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%" + def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]: issues = [] From 89522b607c650fd85d19bf0d720adf9ef0f6d052 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Mon, 11 Dec 2023 16:53:14 +0300 Subject: [PATCH 099/274] update percent of filling --- src/frostfs_testlib/load/k6.py | 29 ++++++++++++++++++++++++- src/frostfs_testlib/load/load_config.py | 2 ++ src/frostfs_testlib/load/runners.py | 4 +++- 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 2ce7c75..38167d2 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -72,6 +72,19 @@ class K6: ) self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id) + def _get_fill_percents(self): + fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n") + return [line.split() for line in fill_percents][:-1] + + def check_fill_percent(self): + fill_percents = self._get_fill_percents() + percent_mean = 0 + for line in fill_percents: + percent_mean += float(line[1].split('%')[0]) + percent_mean = percent_mean / len(fill_percents) + logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}") + return percent_mean >= self.load_params.fill_percent + @property def process_dir(self) -> str: return self._k6_process.process_dir @@ -132,7 +145,7 @@ class K6: with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): self._k6_process.start() - def wait_until_finished(self, soft_timeout: int = 0) -> None: + def wait_until_finished(self, event, soft_timeout: int = 0) -> None: with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): if self.load_params.scenario == LoadScenario.VERIFY: timeout = self.load_params.verify_time or 0 @@ -175,9 +188,23 @@ class K6: wait_interval = min_wait_interval if self._k6_process is None: assert "No k6 instances were executed" + while timeout > 0: + if not self.load_params.fill_percent is None: + with reporter.step(f"Check the percentage of filling of all data disks on the node"): + if self.check_fill_percent(): + logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%") + event.set() + self.stop() + return + + if event.is_set(): + self.stop() + return + if not self._k6_process.running(): return + remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" logger.info( diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index c1c98fe..df46521 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -195,6 +195,8 @@ class LoadParams: "NO_VERIFY_SSL", False, ) + # Percentage of filling of all data disks on all nodes + fill_percent: Optional[float] = None # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index f5284d8..dd6d50e 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -30,6 +30,7 @@ from frostfs_testlib.testing import parallel, run_optionally from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils.file_keeper import FileKeeper +from threading import Event class RunnerBase(ScenarioRunner): @@ -41,7 +42,8 @@ class RunnerBase(ScenarioRunner): @reporter.step("Wait until load finish") def wait_until_finish(self, soft_timeout: int = 0): - parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout) + event = Event() + parallel([k6.wait_until_finished for k6 in self.k6_instances], event=event, soft_timeout=soft_timeout) @property def is_running(self): From 02f3ef6b4077c01441b7b9d063c442aaa319cc6f Mon Sep 17 00:00:00 2001 From: "d.anurin" Date: Thu, 14 Dec 2023 12:53:51 +0300 Subject: [PATCH 100/274] [#147] Provide custom environment to ssh connection Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/hosting/config.py | 1 + src/frostfs_testlib/shell/command_inspectors.py | 2 +- src/frostfs_testlib/shell/ssh_shell.py | 7 +++++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 88fe3e7..4ab66d7 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -67,6 +67,7 @@ class HostConfig: clis: list[CLIConfig] = field(default_factory=list) attributes: dict[str, str] = field(default_factory=dict) interfaces: dict[str, str] = field(default_factory=dict) + environment: dict[str, str] = field(default_factory=dict) def __post_init__(self) -> None: self.services = [ServiceConfig(**service) for service in self.services or []] diff --git a/src/frostfs_testlib/shell/command_inspectors.py b/src/frostfs_testlib/shell/command_inspectors.py index 0003017..8fe2f34 100644 --- a/src/frostfs_testlib/shell/command_inspectors.py +++ b/src/frostfs_testlib/shell/command_inspectors.py @@ -9,7 +9,7 @@ class SudoInspector(CommandInspector): def inspect(self, original_command: str, command: str) -> str: if not command.startswith("sudo"): - return f"sudo -i {command}" + return f"sudo {command}" return command diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index a7e6e1d..e718b4d 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -185,6 +185,7 @@ class SSHShell(Shell): private_key_passphrase: Optional[str] = None, port: str = "22", command_inspectors: Optional[list[CommandInspector]] = None, + custom_environment: Optional[dict] = None ) -> None: super().__init__() self.connection_provider = SshConnectionProvider() @@ -196,6 +197,8 @@ class SSHShell(Shell): self.command_inspectors = command_inspectors or [] + self.environment = custom_environment + @property def _connection(self): return self.connection_provider.provide(self.host, self.port) @@ -224,7 +227,7 @@ class SSHShell(Shell): @log_command def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: - stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True) + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True, environment=self.environment) for interactive_input in options.interactive_inputs: input = interactive_input.input if not input.endswith("\n"): @@ -251,7 +254,7 @@ class SSHShell(Shell): @log_command def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: try: - stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout) + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, environment=self.environment) if options.close_stdin: stdin.close() From 3d63772f4a9dd0aec84220f77336da6157fc1666 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 13 Dec 2023 18:50:06 +0300 Subject: [PATCH 101/274] [#148] Add support for custom registry during read operations Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index df46521..3ea66b8 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -205,8 +205,12 @@ class LoadParams: object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) # For read operations, controls from which set get objects to read read_from: Optional[ReadFrom] = None + # For read operations done from REGISTRY, controls delay which object should live before it will be used for read operation + read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", False) # Output registry K6 file. Filled automatically. registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) + # In case if we want to use custom registry file left from another load run + custom_registry: Optional[str] = None # Specifies the minimum duration of every single execution (i.e. iteration). # Any iterations that are shorter than this value will cause that VU to # sleep for the remainder of the time until the specified minimum duration is reached. @@ -294,6 +298,11 @@ class LoadParams: if self.read_from == ReadFrom.REGISTRY: self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") + + # For now it's okay to have it this way + if self.custom_registry is not None: + self.registry_file = self.custom_registry + if self.read_from == ReadFrom.PRESET: self.registry_file = None From 8e739adea5d299e5dea87b5584429a738f079d85 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 15 Dec 2023 13:13:09 +0300 Subject: [PATCH 102/274] [#150] Increased the status waiting timeout Signed-off-by: Dmitriy Zayakin --- .../storage/controllers/cluster_state_controller.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 290503c..f51be78 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -476,12 +476,12 @@ class ClusterStateController: def _enable_date_synchronizer(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("timedatectl set-ntp true") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 5) + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15) def _disable_date_synchronizer(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("timedatectl set-ntp false") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 5) + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15) def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: disk_controller_id = DiskController.get_id(node, device) From 663c14470981bdd1445f182dd0d6dc00c2d83662 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Fri, 17 Nov 2023 16:36:24 +0300 Subject: [PATCH 103/274] Search container by name using HTTP requests --- src/frostfs_testlib/steps/cli/container.py | 14 ++++++++------ src/frostfs_testlib/steps/s3/s3_helper.py | 6 ++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index be96138..b3afd88 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -1,6 +1,7 @@ import json import logging import re +import requests from dataclasses import dataclass from time import sleep from typing import Optional, Union @@ -344,12 +345,13 @@ def _parse_cid(output: str) -> str: @reporter.step("Search container by name") -def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str): - list_cids = list_containers(wallet, shell, endpoint) - for cid in list_cids: - cont_info = get_container(wallet, cid, shell, endpoint, True) - if cont_info.get("attributes", {}).get("Name", None) == name: - return cid +def search_container_by_name(name: str, node: ClusterNode): + node_shell = node.host.get_shell() + output = node_shell.exec(f"curl -I HEAD http://127.0.0.1:8084/{name}") + pattern = r"X-Container-Id: (\S+)" + cid = re.findall(pattern, output.stdout) + if cid: + return cid[0] return None diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 1d7adfa..68d5379 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -231,6 +231,8 @@ def search_nodes_with_bucket( shell: Shell, endpoint: str, ) -> list[ClusterNode]: - cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint) - nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) + cid = search_container_by_name(name=bucket_name, cluster=cluster) + nodes_list = search_nodes_with_container( + wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster + ) return nodes_list From 10a6efa333cf931260fae399c266100ba6e3e9f1 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 20 Dec 2023 16:02:54 +0300 Subject: [PATCH 104/274] [#151] Refactor load report Signed-off-by: Andrey Berezin --- .../load/interfaces/summarized.py | 93 ++++++ src/frostfs_testlib/load/load_metrics.py | 268 ++++++++++-------- src/frostfs_testlib/load/load_report.py | 168 ++--------- src/frostfs_testlib/load/load_verifiers.py | 70 +---- 4 files changed, 278 insertions(+), 321 deletions(-) create mode 100644 src/frostfs_testlib/load/interfaces/summarized.py diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py new file mode 100644 index 0000000..bca9822 --- /dev/null +++ b/src/frostfs_testlib/load/interfaces/summarized.py @@ -0,0 +1,93 @@ +from dataclasses import dataclass, field + +from frostfs_testlib.load.load_config import LoadParams, LoadScenario +from frostfs_testlib.load.load_metrics import get_metrics_object + + +@dataclass +class SummarizedErorrs: + total: int = field(default_factory=int) + percent: float = field(default_factory=float) + threshold: float = field(default_factory=float) + by_node: dict[str, int] = field(default_factory=dict) + + def calc_stats(self, operations): + self.total += sum(self.by_node.values()) + + if not operations: + return + + self.percent = self.total / operations * 100 + + +@dataclass +class SummarizedLatencies: + avg: float = field(default_factory=float) + min: float = field(default_factory=float) + max: float = field(default_factory=float) + by_node: dict[str, dict[str, int]] = field(default_factory=dict) + + def calc_stats(self): + if not self.by_node: + return + + avgs = [lt["avg"] for lt in self.by_node.values()] + self.avg = sum(avgs) / len(avgs) + + minimal = [lt["min"] for lt in self.by_node.values()] + self.min = min(minimal) + + maximum = [lt["max"] for lt in self.by_node.values()] + self.max = max(maximum) + + +@dataclass +class SummarizedStats: + threads: int = field(default_factory=int) + requested_rate: int = field(default_factory=int) + operations: int = field(default_factory=int) + rate: float = field(default_factory=float) + throughput: float = field(default_factory=float) + latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies) + errors: SummarizedErorrs = field(default_factory=SummarizedErorrs) + passed: bool = True + + def calc_stats(self): + self.errors.calc_stats(self.operations) + self.latencies.calc_stats() + self.passed = self.errors.percent <= self.errors.threshold + + @staticmethod + def collect(load_params: LoadParams, load_summaries: dict) -> dict[str, "SummarizedStats"]: + if load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: + delete_vus = max(load_params.preallocated_deleters or 0, load_params.max_deleters or 0) + write_vus = max(load_params.preallocated_writers or 0, load_params.max_writers or 0) + read_vus = max(load_params.preallocated_readers or 0, load_params.max_readers or 0) + else: + write_vus = load_params.writers + read_vus = load_params.readers + delete_vus = load_params.deleters + + summarized = { + "Write": SummarizedStats(threads=write_vus, requested_rate=load_params.write_rate), + "Read": SummarizedStats(threads=read_vus, requested_rate=load_params.read_rate), + "Delete": SummarizedStats(threads=delete_vus, requested_rate=load_params.delete_rate), + } + + for node_key, load_summary in load_summaries.items(): + metrics = get_metrics_object(load_params.scenario, load_summary) + for operation in metrics.operations: + target = summarized[operation._NAME] + if not operation.total_iterations: + continue + target.operations += operation.total_iterations + target.rate += operation.rate + target.latencies.by_node[node_key] = operation.latency + target.throughput += operation.throughput + if metrics.write.failed_iterations: + target.errors.by_node[node_key] = operation.failed_iterations + + for operation in summarized.values(): + operation.calc_stats() + + return summarized diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 3f175cf..5502b5c 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -1,95 +1,43 @@ from abc import ABC -from typing import Any +from typing import Any, Optional from frostfs_testlib.load.load_config import LoadScenario -class MetricsBase(ABC): - _WRITE_SUCCESS = "" - _WRITE_ERRORS = "" - _WRITE_THROUGHPUT = "data_sent" - _WRITE_LATENCY = "" - - _READ_SUCCESS = "" - _READ_ERRORS = "" - _READ_LATENCY = "" - _READ_THROUGHPUT = "data_received" - - _DELETE_SUCCESS = "" - _DELETE_LATENCY = "" - _DELETE_ERRORS = "" +class OperationMetric(ABC): + _NAME = "" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "" + _LATENCY = "" def __init__(self, summary) -> None: self.summary = summary self.metrics = summary["metrics"] @property - def write_total_iterations(self) -> int: - return self._get_metric(self._WRITE_SUCCESS) + self._get_metric(self._WRITE_ERRORS) + def total_iterations(self) -> int: + return self._get_metric(self._SUCCESS) + self._get_metric(self._ERRORS) @property - def write_success_iterations(self) -> int: - return self._get_metric(self._WRITE_SUCCESS) + def success_iterations(self) -> int: + return self._get_metric(self._SUCCESS) @property - def write_latency(self) -> dict: - return self._get_metric(self._WRITE_LATENCY) + def latency(self) -> dict: + return self._get_metric(self._LATENCY) @property - def write_rate(self) -> float: - return self._get_metric_rate(self._WRITE_SUCCESS) + def rate(self) -> float: + return self._get_metric_rate(self._SUCCESS) @property - def write_failed_iterations(self) -> int: - return self._get_metric(self._WRITE_ERRORS) + def failed_iterations(self) -> int: + return self._get_metric(self._ERRORS) @property - def write_throughput(self) -> float: - return self._get_metric_rate(self._WRITE_THROUGHPUT) - - @property - def read_total_iterations(self) -> int: - return self._get_metric(self._READ_SUCCESS) + self._get_metric(self._READ_ERRORS) - - @property - def read_success_iterations(self) -> int: - return self._get_metric(self._READ_SUCCESS) - - @property - def read_latency(self) -> dict: - return self._get_metric(self._READ_LATENCY) - - @property - def read_rate(self) -> int: - return self._get_metric_rate(self._READ_SUCCESS) - - @property - def read_failed_iterations(self) -> int: - return self._get_metric(self._READ_ERRORS) - - @property - def read_throughput(self) -> float: - return self._get_metric_rate(self._READ_THROUGHPUT) - - @property - def delete_total_iterations(self) -> int: - return self._get_metric(self._DELETE_SUCCESS) + self._get_metric(self._DELETE_ERRORS) - - @property - def delete_success_iterations(self) -> int: - return self._get_metric(self._DELETE_SUCCESS) - - @property - def delete_latency(self) -> dict: - return self._get_metric(self._DELETE_LATENCY) - - @property - def delete_failed_iterations(self) -> int: - return self._get_metric(self._DELETE_ERRORS) - - @property - def delete_rate(self) -> int: - return self._get_metric_rate(self._DELETE_SUCCESS) + def throughput(self) -> float: + return self._get_metric_rate(self._THROUGHPUT) def _get_metric(self, metric: str) -> int: metrics_method_map = { @@ -104,9 +52,7 @@ class MetricsBase(ABC): metric = self.metrics[metric] metric_type = metric["type"] if metric_type not in metrics_method_map: - raise Exception( - f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}" - ) + raise Exception(f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}") return metrics_method_map[metric_type](metric) @@ -119,9 +65,7 @@ class MetricsBase(ABC): metric = self.metrics[metric] metric_type = metric["type"] if metric_type not in metrics_method_map: - raise Exception( - f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}" - ) + raise Exception(f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}") return metrics_method_map[metric_type](metric) @@ -138,63 +82,145 @@ class MetricsBase(ABC): return metric["values"] +class WriteOperationMetric(OperationMetric): + _NAME = "Write" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "data_sent" + _LATENCY = "" + + +class ReadOperationMetric(OperationMetric): + _NAME = "Read" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "data_received" + _LATENCY = "" + + +class DeleteOperationMetric(OperationMetric): + _NAME = "Delete" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "" + _LATENCY = "" + + +class GrpcWriteOperationMetric(WriteOperationMetric): + _SUCCESS = "frostfs_obj_put_total" + _ERRORS = "frostfs_obj_put_fails" + _LATENCY = "frostfs_obj_put_duration" + + +class GrpcReadOperationMetric(ReadOperationMetric): + _SUCCESS = "frostfs_obj_get_total" + _ERRORS = "frostfs_obj_get_fails" + _LATENCY = "frostfs_obj_get_duration" + + +class GrpcDeleteOperationMetric(DeleteOperationMetric): + _SUCCESS = "frostfs_obj_delete_total" + _ERRORS = "frostfs_obj_delete_fails" + _LATENCY = "frostfs_obj_delete_duration" + + +class S3WriteOperationMetric(WriteOperationMetric): + _SUCCESS = "aws_obj_put_total" + _ERRORS = "aws_obj_put_fails" + _LATENCY = "aws_obj_put_duration" + + +class S3ReadOperationMetric(ReadOperationMetric): + _SUCCESS = "aws_obj_get_total" + _ERRORS = "aws_obj_get_fails" + _LATENCY = "aws_obj_get_duration" + + +class S3DeleteOperationMetric(DeleteOperationMetric): + _SUCCESS = "aws_obj_delete_total" + _ERRORS = "aws_obj_delete_fails" + _LATENCY = "aws_obj_delete_duration" + + +class S3LocalWriteOperationMetric(WriteOperationMetric): + _SUCCESS = "s3local_obj_put_total" + _ERRORS = "s3local_obj_put_fails" + _LATENCY = "s3local_obj_put_duration" + + +class S3LocalReadOperationMetric(ReadOperationMetric): + _SUCCESS = "s3local_obj_get_total" + _ERRORS = "s3local_obj_get_fails" + _LATENCY = "s3local_obj_get_duration" + + +class LocalWriteOperationMetric(WriteOperationMetric): + _SUCCESS = "local_obj_put_total" + _ERRORS = "local_obj_put_fails" + _LATENCY = "local_obj_put_duration" + + +class LocalReadOperationMetric(ReadOperationMetric): + _SUCCESS = "local_obj_get_total" + _ERRORS = "local_obj_get_fails" + + +class LocalDeleteOperationMetric(DeleteOperationMetric): + _SUCCESS = "local_obj_delete_total" + _ERRORS = "local_obj_delete_fails" + + +class VerifyReadOperationMetric(ReadOperationMetric): + _SUCCESS = "verified_obj" + _ERRORS = "invalid_obj" + + +class MetricsBase(ABC): + def __init__(self) -> None: + self.write: Optional[WriteOperationMetric] = None + self.read: Optional[ReadOperationMetric] = None + self.delete: Optional[DeleteOperationMetric] = None + + @property + def operations(self) -> list[OperationMetric]: + return [metric for metric in [self.write, self.read, self.delete] if metric is not None] + + class GrpcMetrics(MetricsBase): - _WRITE_SUCCESS = "frostfs_obj_put_total" - _WRITE_ERRORS = "frostfs_obj_put_fails" - _WRITE_LATENCY = "frostfs_obj_put_duration" - - _READ_SUCCESS = "frostfs_obj_get_total" - _READ_ERRORS = "frostfs_obj_get_fails" - _READ_LATENCY = "frostfs_obj_get_duration" - - _DELETE_SUCCESS = "frostfs_obj_delete_total" - _DELETE_ERRORS = "frostfs_obj_delete_fails" - _DELETE_LATENCY = "frostfs_obj_delete_duration" + def __init__(self, summary) -> None: + super().__init__() + self.write = GrpcWriteOperationMetric(summary) + self.read = GrpcReadOperationMetric(summary) + self.delete = GrpcDeleteOperationMetric(summary) class S3Metrics(MetricsBase): - _WRITE_SUCCESS = "aws_obj_put_total" - _WRITE_ERRORS = "aws_obj_put_fails" - _WRITE_LATENCY = "aws_obj_put_duration" + def __init__(self, summary) -> None: + super().__init__() + self.write = S3WriteOperationMetric(summary) + self.read = S3ReadOperationMetric(summary) + self.delete = S3DeleteOperationMetric(summary) - _READ_SUCCESS = "aws_obj_get_total" - _READ_ERRORS = "aws_obj_get_fails" - _READ_LATENCY = "aws_obj_get_duration" - - _DELETE_SUCCESS = "aws_obj_delete_total" - _DELETE_ERRORS = "aws_obj_delete_fails" - _DELETE_LATENCY = "aws_obj_delete_duration" class S3LocalMetrics(MetricsBase): - _WRITE_SUCCESS = "s3local_obj_put_total" - _WRITE_ERRORS = "s3local_obj_put_fails" - _WRITE_LATENCY = "s3local_obj_put_duration" + def __init__(self, summary) -> None: + super().__init__() + self.write = S3LocalWriteOperationMetric(summary) + self.read = S3LocalReadOperationMetric(summary) - _READ_SUCCESS = "s3local_obj_get_total" - _READ_ERRORS = "s3local_obj_get_fails" - _READ_LATENCY = "s3local_obj_get_duration" class LocalMetrics(MetricsBase): - _WRITE_SUCCESS = "local_obj_put_total" - _WRITE_ERRORS = "local_obj_put_fails" - _WRITE_LATENCY = "local_obj_put_duration" - - _READ_SUCCESS = "local_obj_get_total" - _READ_ERRORS = "local_obj_get_fails" - - _DELETE_SUCCESS = "local_obj_delete_total" - _DELETE_ERRORS = "local_obj_delete_fails" + def __init__(self, summary) -> None: + super().__init__() + self.write = LocalWriteOperationMetric(summary) + self.read = LocalReadOperationMetric(summary) + self.delete = LocalDeleteOperationMetric(summary) class VerifyMetrics(MetricsBase): - _WRITE_SUCCESS = "N/A" - _WRITE_ERRORS = "N/A" - - _READ_SUCCESS = "verified_obj" - _READ_ERRORS = "invalid_obj" - - _DELETE_SUCCESS = "N/A" - _DELETE_ERRORS = "N/A" + def __init__(self, summary) -> None: + super().__init__() + self.read = VerifyReadOperationMetric(summary) def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase: diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 105d852..22ddb54 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -3,8 +3,8 @@ from typing import Optional import yaml +from frostfs_testlib.load.interfaces.summarized import SummarizedStats from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario -from frostfs_testlib.load.load_metrics import get_metrics_object from frostfs_testlib.utils.converting_utils import calc_unit @@ -114,63 +114,46 @@ class LoadReport: return model_map[self.load_params.scenario] - def _get_operations_sub_section_html( - self, - operation_type: str, - total_operations: int, - requested_rate_str: str, - vus_str: str, - total_rate: float, - throughput: float, - errors: dict[str, int], - latency: dict[str, dict], - ): + def _get_operations_sub_section_html(self, operation_type: str, stats: SummarizedStats): throughput_html = "" - if throughput > 0: - throughput, unit = calc_unit(throughput) + if stats.throughput > 0: + throughput, unit = calc_unit(stats.throughput) throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") per_node_errors_html = "" - total_errors = 0 - if errors: - total_errors: int = 0 - for node_key, errors in errors.items(): - total_errors += errors - if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: - per_node_errors_html += self._row(f"At {node_key}", errors) + for node_key, errors in stats.errors.by_node.items(): + if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: + per_node_errors_html += self._row(f"At {node_key}", errors) latency_html = "" - if latency: - for node_key, latency_dict in latency.items(): - latency_values = "N/A" - if latency_dict: - latency_values = "" - for param_name, param_val in latency_dict.items(): - latency_values += f"{param_name}={param_val:.2f}ms " + for node_key, latencies in stats.latencies.by_node.items(): + latency_values = "N/A" + if latencies: + latency_values = "" + for param_name, param_val in latencies.items(): + latency_values += f"{param_name}={param_val:.2f}ms " - latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values) + latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values) object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) model = self._get_model_string() + requested_rate_str = f"{stats.requested_rate}op/sec" if stats.requested_rate else "" # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s - short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s" - errors_percent = 0 - if total_operations: - errors_percent = total_errors / total_operations * 100.0 + short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {stats.threads}th {model} - {throughput:.2f}{unit}/s {stats.rate:.2f}/s" html = f"""
{short_summary}
- {self._row("Total operations", total_operations)} - {self._row("OP/sec", f"{total_rate:.2f}")} + {self._row("Total operations", stats.operations)} + {self._row("OP/sec", f"{stats.rate:.2f}")} {throughput_html} {latency_html} {per_node_errors_html} - {self._row("Total", f"{total_errors} ({errors_percent:.2f}%)")} - {self._row("Threshold", f"{self.load_params.error_threshold:.2f}%")} + {self._row("Total", f"{stats.errors.total} ({stats.errors.percent:.2f}%)")} + {self._row("Threshold", f"{stats.errors.threshold:.2f}%")}
{short_summary}
Metrics
Errors


""" @@ -178,111 +161,12 @@ class LoadReport: def _get_totals_section_html(self): html = "" - for i, load_summaries in enumerate(self.load_summaries_list, 1): - html += f"

Load Results for load #{i}

" + for i in range(len(self.load_summaries_list)): + html += f"

Load Results for load #{i+1}

" - write_operations = 0 - write_op_sec = 0 - write_throughput = 0 - write_latency = {} - write_errors = {} - requested_write_rate = self.load_params.write_rate - requested_write_rate_str = f"{requested_write_rate}op/sec" if requested_write_rate else "" - - read_operations = 0 - read_op_sec = 0 - read_throughput = 0 - read_latency = {} - read_errors = {} - requested_read_rate = self.load_params.read_rate - requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else "" - - delete_operations = 0 - delete_op_sec = 0 - delete_latency = {} - delete_errors = {} - requested_delete_rate = self.load_params.delete_rate - requested_delete_rate_str = f"{requested_delete_rate}op/sec" if requested_delete_rate else "" - - if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: - delete_vus = max(self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0) - write_vus = max(self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0) - read_vus = max(self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0) - else: - write_vus = self.load_params.writers - read_vus = self.load_params.readers - delete_vus = self.load_params.deleters - - write_vus_str = f"{write_vus}th" - read_vus_str = f"{read_vus}th" - delete_vus_str = f"{delete_vus}th" - - write_section_required = False - read_section_required = False - delete_section_required = False - - for node_key, load_summary in load_summaries.items(): - metrics = get_metrics_object(self.load_params.scenario, load_summary) - write_operations += metrics.write_total_iterations - if write_operations: - write_section_required = True - write_op_sec += metrics.write_rate - write_latency[node_key] = metrics.write_latency - write_throughput += metrics.write_throughput - if metrics.write_failed_iterations: - write_errors[node_key] = metrics.write_failed_iterations - - read_operations += metrics.read_total_iterations - if read_operations: - read_section_required = True - read_op_sec += metrics.read_rate - read_throughput += metrics.read_throughput - read_latency[node_key] = metrics.read_latency - if metrics.read_failed_iterations: - read_errors[node_key] = metrics.read_failed_iterations - - delete_operations += metrics.delete_total_iterations - if delete_operations: - delete_section_required = True - delete_op_sec += metrics.delete_rate - delete_latency[node_key] = metrics.delete_latency - if metrics.delete_failed_iterations: - delete_errors[node_key] = metrics.delete_failed_iterations - - if write_section_required: - html += self._get_operations_sub_section_html( - "Write", - write_operations, - requested_write_rate_str, - write_vus_str, - write_op_sec, - write_throughput, - write_errors, - write_latency, - ) - - if read_section_required: - html += self._get_operations_sub_section_html( - "Read", - read_operations, - requested_read_rate_str, - read_vus_str, - read_op_sec, - read_throughput, - read_errors, - read_latency, - ) - - if delete_section_required: - html += self._get_operations_sub_section_html( - "Delete", - delete_operations, - requested_delete_rate_str, - delete_vus_str, - delete_op_sec, - 0, - delete_errors, - delete_latency, - ) + summarized = SummarizedStats.collect(self.load_params, self.load_summaries_list[i]) + for operation_type, stats in summarized.items(): + if stats.operations: + html += self._get_operations_sub_section_html(operation_type, stats) return html diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index 5ca92dc..cbf6f64 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -1,4 +1,5 @@ from frostfs_testlib import reporter +from frostfs_testlib.load.interfaces.summarized import SummarizedStats from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object @@ -8,56 +9,16 @@ class LoadVerifier: self.load_params = load_params def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]: - write_operations = 0 - write_errors = 0 - - read_operations = 0 - read_errors = 0 - - delete_operations = 0 - delete_errors = 0 - - writers = self.load_params.writers or self.load_params.preallocated_writers or 0 - readers = self.load_params.readers or self.load_params.preallocated_readers or 0 - deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 - - for load_summary in load_summaries.values(): - metrics = get_metrics_object(self.load_params.scenario, load_summary) - - if writers: - write_operations += metrics.write_total_iterations - write_errors += metrics.write_failed_iterations - - if readers: - read_operations += metrics.read_total_iterations - read_errors += metrics.read_failed_iterations - - if deleters: - delete_operations += metrics.delete_total_iterations - delete_errors += metrics.delete_failed_iterations - + summarized = SummarizedStats.collect(self.load_params, load_summaries) issues = [] - if writers and not write_operations: - issues.append(f"No any write operation was performed") - if readers and not read_operations: - issues.append(f"No any read operation was performed") - if deleters and not delete_operations: - issues.append(f"No any delete operation was performed") - error_rate = self._get_error_rate(writers, write_operations, write_errors) - if error_rate > self.load_params.error_threshold: - rate_str = self._get_rate_str(error_rate) - issues.append(f"Write errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") + for operation_type, stats in summarized.items(): + if stats.threads and not stats.operations: + issues.append(f"No any {operation_type.lower()} operation was performed") - error_rate = self._get_error_rate(readers, read_operations, read_errors) - if error_rate > self.load_params.error_threshold: - rate_str = self._get_rate_str(error_rate) - issues.append(f"Read errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") - - error_rate = self._get_error_rate(deleters, delete_operations, delete_errors) - if error_rate > self.load_params.error_threshold: - rate_str = self._get_rate_str(error_rate) - issues.append(f"Delete errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") + if stats.errors.percent > stats.errors.threshold: + rate_str = self._get_rate_str(stats.errors.percent) + issues.append(f"{operation_type} errors exceeded threshold: {rate_str} > {stats.errors.threshold}%") return issues @@ -74,13 +35,6 @@ class LoadVerifier: ) return verify_issues - def _get_error_rate(self, vus: int, operations: int, errors: int) -> float: - if not operations or not vus: - return 0 - - error_rate = errors / operations * 100 - return error_rate - def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str: return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%" @@ -95,13 +49,13 @@ class LoadVerifier: delete_success = 0 if deleters > 0: - delete_success = load_metrics.delete_success_iterations + delete_success = load_metrics.delete.success_iterations if verification_summary: verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) - verified_objects = verify_metrics.read_success_iterations - invalid_objects = verify_metrics.read_failed_iterations - total_left_objects = load_metrics.write_success_iterations - delete_success + verified_objects = verify_metrics.read.success_iterations + invalid_objects = verify_metrics.read.failed_iterations + total_left_objects = load_metrics.write.success_iterations - delete_success # Due to interruptions we may see total verified objects to be less than written on writers count if abs(total_left_objects - verified_objects) > writers: From 73c362c307bc4eda76acbd4dcdaf2b37112c4592 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 22 Dec 2023 11:33:41 +0300 Subject: [PATCH 105/274] [#153] Fix stat calculation and add error threshold Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/interfaces/summarized.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py index bca9822..a005963 100644 --- a/src/frostfs_testlib/load/interfaces/summarized.py +++ b/src/frostfs_testlib/load/interfaces/summarized.py @@ -84,7 +84,8 @@ class SummarizedStats: target.rate += operation.rate target.latencies.by_node[node_key] = operation.latency target.throughput += operation.throughput - if metrics.write.failed_iterations: + target.errors.threshold = load_params.error_threshold + if operation.failed_iterations: target.errors.by_node[node_key] = operation.failed_iterations for operation in summarized.values(): From a4d1082ed558f7c95c2ebe3f8452720a848f681c Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 21 Dec 2023 10:47:43 +0300 Subject: [PATCH 106/274] Shards are attribute of StorageNode class --- src/frostfs_testlib/hosting/interfaces.py | 16 ++- .../storage/dataclasses/frostfs_services.py | 26 ++++- .../storage/dataclasses/shard.py | 99 +++++++++++++++++++ 3 files changed, 135 insertions(+), 6 deletions(-) create mode 100644 src/frostfs_testlib/storage/dataclasses/shard.py diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index daea6eb..3b2d718 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -219,12 +219,22 @@ class Host(ABC): """ @abstractmethod - def delete_pilorama(self, service_name: str) -> None: + def delete_file(self, file_path: str) -> None: """ - Deletes all pilorama.db files in the node. + Deletes file with provided file path Args: - service_name: Name of storage node service. + file_path: full path to the file to delete + + """ + + @abstractmethod + def is_file_exist(self, file_path: str) -> bool: + """ + Checks if file exist + + Args: + file_path: full path to the file to check """ diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 6413ded..33e7894 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -3,7 +3,7 @@ import yaml from frostfs_testlib.blockchain import RPCClient from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.node_base import NodeBase - +from frostfs_testlib.storage.dataclasses.shard import Shard class InnerRing(NodeBase): """ @@ -148,6 +148,20 @@ class StorageNode(NodeBase): def get_shards_config(self) -> tuple[str, dict]: return self.get_config(self.get_shard_config_path()) + def get_shards(self) -> list[Shard]: + config = self.get_shards_config()[1] + config["storage"]["shard"].pop("default") + return [Shard.from_object(shard) for shard in config["storage"]["shard"].values()] + + def get_shards_from_env(self) -> list[Shard]: + config = self.get_shards_config()[1] + configObj = ConfigObj(StringIO(config)) + + pattern = f"{SHARD_PREFIX}\d*" + num_shards = len(set(re.findall(pattern, self.get_shards_config()))) + + return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)] + def get_control_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) @@ -157,6 +171,9 @@ class StorageNode(NodeBase): def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) + def get_storage_config(self) -> str: + return self.host.get_storage_config(self.name) + def get_http_hostname(self) -> str: return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) @@ -169,8 +186,11 @@ class StorageNode(NodeBase): def delete_fstree(self): self.host.delete_fstree(self.name) - def delete_pilorama(self): - self.host.delete_pilorama(self.name) + def delete_file(self, file_path: str) -> None: + self.host.delete_file(file_path) + + def is_file_exist(self, file_path) -> bool: + return self.host.is_file_exist(file_path) def delete_metabase(self): self.host.delete_metabase(self.name) diff --git a/src/frostfs_testlib/storage/dataclasses/shard.py b/src/frostfs_testlib/storage/dataclasses/shard.py new file mode 100644 index 0000000..584138d --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/shard.py @@ -0,0 +1,99 @@ +import json +import pathlib +import re +from dataclasses import dataclass +from io import StringIO + +import allure +import pytest +import yaml +from configobj import ConfigObj +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG + +SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_" +BLOBSTOR_PREFIX = "_BLOBSTOR_" + + +@dataclass +class Blobstor: + path: str + path_type: str + + def __eq__(self, other) -> bool: + if not isinstance(other, self.__class__): + raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") + return self.path == other.path and self.path_type == other.path_type + + def __hash__(self): + return hash((self.path, self.path_type)) + + @staticmethod + def from_config_object(section: ConfigObj, shard_id: str, blobstor_id: str): + var_prefix = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}{blobstor_id}" + return Blobstor(section.get(f"{var_prefix}_PATH"), section.get(f"{var_prefix}_TYPE")) + + +@dataclass +class Shard: + blobstor: list[Blobstor] + metabase: str + writecache: str + pilorama: str + + def __eq__(self, other) -> bool: + if not isinstance(other, self.__class__): + raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") + return ( + set(self.blobstor) == set(other.blobstor) + and self.metabase == other.metabase + and self.writecache == other.writecache + and self.pilorama == other.pilorama + ) + + def __hash__(self): + return hash((self.metabase, self.writecache)) + + @staticmethod + def _get_blobstor_count_from_section(config_object: ConfigObj, shard_id: int): + pattern = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}" + blobstors = {key[: len(pattern) + 2] for key in config_object.keys() if pattern in key} + return len(blobstors) + + @staticmethod + def from_config_object(config_object: ConfigObj, shard_id: int): + var_prefix = f"{SHARD_PREFIX}{shard_id}" + + blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id) + blobstors = [ + Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count) + ] + + write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED") + + return Shard( + blobstors, + config_object.get(f"{var_prefix}_METABASE_PATH"), + config_object.get(f"{var_prefix}_WRITECACHE_PATH") if write_cache_enabled else "", + ) + + @staticmethod + def from_object(shard): + metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"] + writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"] + + # Currently due to issue we need to check if pilorama exists in keys + # TODO: make pilorama mandatory after fix + if shard.get("pilorama"): + pilorama = shard["pilorama"]["path"] if "path" in shard["pilorama"] else shard["pilorama"] + else: + pilorama = None + + return Shard( + blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]], + metabase=metabase, + writecache=writecache, + pilorama=pilorama + ) + From a3bda0b34f828ee16f577d1e3a59bf0dffe729b0 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 11 Jan 2024 13:42:02 +0300 Subject: [PATCH 107/274] [#154] Change func search container Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/s3/s3_helper.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 68d5379..dbd3765 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -231,8 +231,10 @@ def search_nodes_with_bucket( shell: Shell, endpoint: str, ) -> list[ClusterNode]: - cid = search_container_by_name(name=bucket_name, cluster=cluster) - nodes_list = search_nodes_with_container( - wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster - ) + cid = None + for cluster_node in cluster.cluster_nodes: + cid = search_container_by_name(name=bucket_name, node=cluster_node) + if cid: + break + nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) return nodes_list From d6a2cf92a262e718afd81009c236b98b396682af Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 11 Jan 2024 14:51:07 +0300 Subject: [PATCH 108/274] [#155] Change args to optionally Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 27 +++++++------------ 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 6b47ac2..1727249 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -68,11 +68,7 @@ class FrostfsCliShards(CliCommand): return self._execute_with_password( "control shards set-mode", wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) def dump( @@ -105,18 +101,14 @@ class FrostfsCliShards(CliCommand): return self._execute_with_password( "control shards dump", wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) def list( self, endpoint: str, - wallet: str, - wallet_password: str, + wallet: Optional[str] = None, + wallet_password: Optional[str] = None, address: Optional[str] = None, json_mode: bool = False, timeout: Optional[str] = None, @@ -135,12 +127,13 @@ class FrostfsCliShards(CliCommand): Returns: Command's result. """ + if not wallet_password: + return self._execute( + "control shards list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) return self._execute_with_password( "control shards list", wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) From df8d99d83cf4efd3ceb8b5c629e1f6083e92f153 Mon Sep 17 00:00:00 2001 From: Liza Date: Fri, 29 Dec 2023 01:25:13 +0300 Subject: [PATCH 109/274] [#156] load_time in the format of days, hours and minutes; new params Signed-off-by: Liza --- src/frostfs_testlib/load/load_config.py | 44 +++++++++++- tests/test_load_config.py | 89 +++++++++++++++++++++++++ 2 files changed, 131 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 3ea66b8..6f355fc 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -3,11 +3,28 @@ import os from dataclasses import dataclass, field, fields, is_dataclass from enum import Enum from types import MappingProxyType -from typing import Any, Optional, get_args +from typing import Any, Callable, Optional, get_args from frostfs_testlib.utils.converting_utils import calc_unit +def convert_time_to_seconds(time: int | str | None) -> int: + if time is None: + return None + if str(time).isdigit(): + seconds = int(time) + else: + days, hours, minutes = 0, 0, 0 + if "d" in time: + days, time = time.split("d") + if "h" in time: + hours, time = time.split("h") + if "min" in time: + minutes = time.replace("min", "") + seconds = int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60 + return seconds + + class LoadType(Enum): gRPC = "grpc" S3 = "s3" @@ -76,6 +93,7 @@ def metadata_field( scenario_variable: Optional[str] = None, string_repr: Optional[bool] = True, distributed: Optional[bool] = False, + formatter: Optional[Callable] = None, ): return field( default=None, @@ -85,6 +103,7 @@ def metadata_field( "env_variable": scenario_variable, "string_repr": string_repr, "distributed": distributed, + "formatter": formatter, }, ) @@ -200,7 +219,9 @@ class LoadParams: # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. - load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False) + load_time: Optional[int] = metadata_field( + all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds + ) # Object size in KB for load and preset. object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) # For read operations, controls from which set get objects to read @@ -384,6 +405,25 @@ class LoadParams: return fields_with_data or [] + def _get_field_formatter(self, field_name: str) -> Callable | None: + data_fields = fields(self) + formatters = [ + field.metadata["formatter"] + for field in data_fields + if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None + ] + if formatters: + return formatters[0] + + return None + + def __setattr__(self, field_name, value): + formatter = self._get_field_formatter(field_name) + if formatter: + value = formatter(value) + + super().__setattr__(field_name, value) + def __str__(self) -> str: load_type_str = self.scenario.value if self.scenario else self.load_type.value # TODO: migrate load_params defaults to testlib diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 926399b..f4fa022 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -141,6 +141,8 @@ class TestLoadConfig: "--workers '7'", "--containers '16'", "--policy 'container_placement_policy'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "DURATION": 9, @@ -151,6 +153,7 @@ class TestLoadConfig: "WRITERS": 7, "READERS": 7, "DELETERS": 8, + "READ_AGE": 8, "PREGEN_JSON": "pregen_json", "PREPARE_LOCALLY": True, } @@ -167,6 +170,8 @@ class TestLoadConfig: "--workers '7'", "--containers '16'", "--policy 'container_placement_policy'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "DURATION": 9, @@ -184,6 +189,7 @@ class TestLoadConfig: "TIME_UNIT": "time_unit", "WRITE_RATE": 10, "READ_RATE": 9, + "READ_AGE": 8, "DELETE_RATE": 11, "PREPARE_LOCALLY": True, } @@ -201,6 +207,8 @@ class TestLoadConfig: "--workers '7'", "--buckets '13'", "--location 's3_location'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "DURATION": 9, @@ -211,6 +219,7 @@ class TestLoadConfig: "WRITERS": 7, "READERS": 7, "DELETERS": 8, + "READ_AGE": 8, "NO_VERIFY_SSL": True, "PREGEN_JSON": "pregen_json", } @@ -218,6 +227,44 @@ class TestLoadConfig: self._check_preset_params(load_params, expected_preset_args) self._check_env_vars(load_params, expected_env_vars) + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) + def test_argument_parsing_for_s3_car_scenario_with_stringed_time(self, load_params: LoadParams): + load_params.load_time = "2d3h5min" + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--no-verify-ssl", + "--out 'pregen_json'", + "--workers '7'", + "--buckets '13'", + "--location 's3_location'", + "--ignore-errors", + "--sleep '19'", + ] + expected_env_vars = { + "DURATION": 183900, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "MAX_WRITERS": 11, + "MAX_READERS": 11, + "MAX_DELETERS": 12, + "PRE_ALLOC_DELETERS": 21, + "PRE_ALLOC_READERS": 20, + "PRE_ALLOC_WRITERS": 20, + "PREGEN_JSON": "pregen_json", + "TIME_UNIT": "time_unit", + "WRITE_RATE": 10, + "READ_RATE": 9, + "READ_AGE": 8, + "DELETE_RATE": 11, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): expected_preset_args = [ @@ -228,6 +275,8 @@ class TestLoadConfig: "--workers '7'", "--buckets '13'", "--location 's3_location'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "DURATION": 9, @@ -246,6 +295,7 @@ class TestLoadConfig: "TIME_UNIT": "time_unit", "WRITE_RATE": 10, "READ_RATE": 9, + "READ_AGE": 8, "DELETE_RATE": 11, } @@ -262,6 +312,8 @@ class TestLoadConfig: "--workers '7'", "--containers '16'", "--policy 'container_placement_policy'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "DURATION": 9, @@ -273,6 +325,7 @@ class TestLoadConfig: "WRITERS": 7, "READERS": 7, "DELETERS": 8, + "READ_AGE": 8, "PREGEN_JSON": "pregen_json", } @@ -288,6 +341,8 @@ class TestLoadConfig: "--workers '7'", "--containers '16'", "--policy 'container_placement_policy'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "CONFIG_FILE": "config_file", @@ -299,6 +354,7 @@ class TestLoadConfig: "WRITERS": 7, "READERS": 7, "DELETERS": 8, + "READ_AGE": 8, "PREGEN_JSON": "pregen_json", } @@ -338,6 +394,7 @@ class TestLoadConfig: "--workers '0'", "--containers '0'", "--policy ''", + "--sleep '0'", ] expected_env_vars = { "DURATION": 0, @@ -348,6 +405,7 @@ class TestLoadConfig: "WRITERS": 0, "READERS": 0, "DELETERS": 0, + "READ_AGE": 0, "PREGEN_JSON": "", "PREPARE_LOCALLY": False, } @@ -364,6 +422,7 @@ class TestLoadConfig: "--workers '0'", "--containers '0'", "--policy ''", + "--sleep '0'", ] expected_env_vars = { "DURATION": 0, @@ -382,6 +441,7 @@ class TestLoadConfig: "WRITE_RATE": 0, "READ_RATE": 0, "DELETE_RATE": 0, + "READ_AGE": 0, "PREPARE_LOCALLY": False, } @@ -397,6 +457,7 @@ class TestLoadConfig: "--workers '0'", "--buckets '0'", "--location ''", + "--sleep '0'", ] expected_env_vars = { "DURATION": 0, @@ -407,6 +468,7 @@ class TestLoadConfig: "WRITERS": 0, "READERS": 0, "DELETERS": 0, + "READ_AGE": 0, "NO_VERIFY_SSL": False, "PREGEN_JSON": "", } @@ -423,6 +485,7 @@ class TestLoadConfig: "--workers '0'", "--buckets '0'", "--location ''", + "--sleep '0'", ] expected_env_vars = { "DURATION": 0, @@ -442,6 +505,7 @@ class TestLoadConfig: "WRITE_RATE": 0, "READ_RATE": 0, "DELETE_RATE": 0, + "READ_AGE": 0, } self._check_preset_params(load_params, expected_preset_args) @@ -456,6 +520,7 @@ class TestLoadConfig: "--workers '0'", "--containers '0'", "--policy ''", + "--sleep '0'", ] expected_env_vars = { "DURATION": 0, @@ -467,6 +532,7 @@ class TestLoadConfig: "WRITERS": 0, "READERS": 0, "DELETERS": 0, + "READ_AGE": 0, "PREGEN_JSON": "", } @@ -482,6 +548,7 @@ class TestLoadConfig: "--workers '0'", "--containers '0'", "--policy ''", + "--sleep '0'", ] expected_env_vars = { "CONFIG_FILE": "", @@ -493,6 +560,7 @@ class TestLoadConfig: "WRITERS": 0, "READERS": 0, "DELETERS": 0, + "READ_AGE": 0, "PREGEN_JSON": "", } @@ -531,6 +599,27 @@ class TestLoadConfig: self._check_env_vars(load_params, expected_env_vars) + @pytest.mark.parametrize( + "load_params, load_type", + [(LoadScenario.gRPC, LoadType.gRPC)], + indirect=True, + ) + @pytest.mark.parametrize( + "load_time, expected_seconds", + [ + (300, 300), + ("2d3h45min", 186300), + ("1d6h", 108000), + ("1d", 86400), + ("1d1min", 86460), + ("2h", 7200), + ("2h2min", 7320), + ], + ) + def test_convert_time_to_seconds(self, load_params: LoadParams, load_time: str | int, expected_seconds: int): + load_params.load_time = load_time + assert load_params.load_time == expected_seconds + def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]): preset_parameters = load_params.get_preset_arguments() assert sorted(preset_parameters) == sorted(expected_preset_args) From be36a10f1e9e56a55843b8b653a39b7479a8eb38 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 12 Jan 2024 18:23:04 +0300 Subject: [PATCH 110/274] [#157] fix for dev-env and unit-tests Signed-off-by: Andrey Berezin --- src/frostfs_testlib/hosting/docker_host.py | 14 ++++++++------ src/frostfs_testlib/steps/epoch.py | 2 +- tests/test_dataclasses.py | 16 +++++----------- tests/test_hosting.py | 14 +++++--------- 4 files changed, 19 insertions(+), 27 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 05cd4b2..3c9883a 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -152,9 +152,7 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) - def wait_for_service_to_be_in_state( - self, systemd_service_name: str, expected_state: str, timeout: int - ) -> None: + def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: raise NotImplementedError("Not implemented for docker") def get_data_directory(self, service_name: str) -> str: @@ -181,6 +179,12 @@ class DockerHost(Host): def delete_pilorama(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") + def delete_file(self, file_path: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def is_file_exist(self, file_path: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: volume_path = self.get_data_directory(service_name) @@ -305,9 +309,7 @@ class DockerHost(Host): return container return None - def _wait_for_container_to_be_in_state( - self, container_name: str, expected_state: str, timeout: int - ) -> None: + def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None: iterations = 10 iteration_wait_time = timeout / iterations diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index 5a43ba3..ef8f85a 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -87,7 +87,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] remote_shell = alive_node.host.get_shell() - if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: + if "force_transactions" not in alive_node.host.config.attributes: # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) frostfs_adm = FrostfsAdm( shell=remote_shell, diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index f1cc51e..19f3832 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -4,13 +4,7 @@ import pytest from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper from frostfs_testlib.storage.dataclasses.acl import EACLRole -from frostfs_testlib.storage.dataclasses.frostfs_services import ( - HTTPGate, - InnerRing, - MorphChain, - S3Gate, - StorageNode, -) +from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.object_size import ObjectSize @@ -22,10 +16,10 @@ class TestDataclassesStr: [ (Boto3ClientWrapper, "Boto3 client"), (AwsCliClient, "AWS CLI"), - (ObjectSize("simple", 1), "simple object size"), - (ObjectSize("simple", 10), "simple object size"), - (ObjectSize("complex", 5000), "complex object size"), - (ObjectSize("complex", 5555), "complex object size"), + (ObjectSize("simple", 1), "simple"), + (ObjectSize("simple", 10), "simple"), + (ObjectSize("complex", 5000), "complex"), + (ObjectSize("complex", 5555), "complex"), (StorageNode, "StorageNode"), (MorphChain, "MorphChain"), (S3Gate, "S3Gate"), diff --git a/tests/test_hosting.py b/tests/test_hosting.py index 14be8c5..39580cb 100644 --- a/tests/test_hosting.py +++ b/tests/test_hosting.py @@ -15,6 +15,7 @@ class TestHosting(TestCase): HOST1 = { "address": HOST1_ADDRESS, "plugin_name": HOST1_PLUGIN, + "healthcheck_plugin_name": "basic", "attributes": HOST1_ATTRIBUTES, "clis": HOST1_CLIS, "services": HOST1_SERVICES, @@ -32,6 +33,7 @@ class TestHosting(TestCase): HOST2 = { "address": HOST2_ADDRESS, "plugin_name": HOST2_PLUGIN, + "healthcheck_plugin_name": "basic", "attributes": HOST2_ATTRIBUTES, "clis": HOST2_CLIS, "services": HOST2_SERVICES, @@ -52,18 +54,14 @@ class TestHosting(TestCase): self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN) self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES) self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS]) - self.assertListEqual( - host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES] - ) + self.assertListEqual(host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES]) host2 = hosting.get_host_by_address(self.HOST2_ADDRESS) self.assertEqual(host2.config.address, self.HOST2_ADDRESS) self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN) self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES) self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS]) - self.assertListEqual( - host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES] - ) + self.assertListEqual(host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES]) def test_get_host_by_service(self): hosting = Hosting() @@ -104,9 +102,7 @@ class TestHosting(TestCase): services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}") self.assertEqual(len(services), 2) for service in services: - self.assertEqual( - service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX - ) + self.assertEqual(service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX) service1 = hosting.find_service_configs(self.SERVICE1["name"]) self.assertEqual(len(service1), 1) From 40fa2c24cc159909d3b9fec391f56dffc4edc7da Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Fri, 12 Jan 2024 20:25:39 +0300 Subject: [PATCH 111/274] rename local_config_path --- src/frostfs_testlib/storage/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index b1b7995..710262a 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -8,7 +8,7 @@ class ConfigAttributes: SHARD_CONFIG_PATH = "shard_config_path" LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" - LOCAL_WALLET_CONFIG = "local_config_path" + LOCAL_WALLET_CONFIG = "local_wallet_config_path" ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" ENDPOINT_INTERNAL = "endpoint_internal0" From c0a25ab699088a79c535aeded3b4150e922be5dd Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 18 Jan 2024 10:41:36 +0300 Subject: [PATCH 112/274] Support of custom version parameter instead of --version for all bins --- src/frostfs_testlib/utils/version_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 42bde6d..75ce8a5 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -40,16 +40,20 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: for service_config in host.config.services: exec_path = service_config.attributes.get("exec_path") requires_check = service_config.attributes.get("requires_version_check", "true") + version_parameter = service_config.attributes.get("custom_version_parameter", "--version") if exec_path: binary_path_by_name[service_config.name] = { "exec_path": exec_path, "check": requires_check.lower() == "true", + "version_parameter": version_parameter, } for cli_config in host.config.clis: requires_check = cli_config.attributes.get("requires_version_check", "true") + version_parameter = service_config.attributes.get("custom_version_parameter", "--version") binary_path_by_name[cli_config.name] = { "exec_path": cli_config.exec_path, "check": requires_check.lower() == "true", + "version_parameter": version_parameter, } shell = host.get_shell() @@ -57,7 +61,7 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: for binary_name, binary in binary_path_by_name.items(): try: binary_path = binary["exec_path"] - result = shell.exec(f"{binary_path} --version") + result = shell.exec(f"{binary_path} {binary['version_parameter']}") versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") From 328e43fe674d16e90c162d1bd6cfb2fd463de012 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 22 Jan 2024 14:14:10 +0300 Subject: [PATCH 113/274] [#162] Refactor frostfs-cli functional Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/container.py | 8 +------- src/frostfs_testlib/cli/frostfs_cli/object.py | 6 ++---- src/frostfs_testlib/defaults.py | 2 +- src/frostfs_testlib/resources/cli.py | 2 +- src/frostfs_testlib/steps/cli/container.py | 7 +++---- src/frostfs_testlib/steps/cli/object.py | 13 +++++++------ src/frostfs_testlib/storage/constants.py | 1 + .../storage/dataclasses/node_base.py | 15 +++++++++++++-- 8 files changed, 29 insertions(+), 25 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 5ea8ba8..374c880 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -65,7 +65,6 @@ class FrostfsCliContainer(CliCommand): ttl: Optional[int] = None, xhdr: Optional[dict] = None, force: bool = False, - timeout: Optional[str] = None, ) -> CommandResult: """ Delete an existing container. @@ -81,7 +80,6 @@ class FrostfsCliContainer(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -298,9 +296,5 @@ class FrostfsCliContainer(CliCommand): return self._execute( f"container nodes {from_str}", - **{ - param: value - for param, value in locals().items() - if param not in ["self", "from_file", "from_str"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]}, ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 476af68..0e4654b 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -124,9 +124,7 @@ class FrostfsCliObject(CliCommand): """ return self._execute( "object hash", - **{ - param: value for param, value in locals().items() if param not in ["self", "params"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "params"]}, ) def head( @@ -355,8 +353,8 @@ class FrostfsCliObject(CliCommand): def nodes( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional = None, diff --git a/src/frostfs_testlib/defaults.py b/src/frostfs_testlib/defaults.py index 687fbd6..22097be 100644 --- a/src/frostfs_testlib/defaults.py +++ b/src/frostfs_testlib/defaults.py @@ -1,5 +1,5 @@ class Options: - DEFAULT_SHELL_TIMEOUT = 90 + DEFAULT_SHELL_TIMEOUT = 120 @staticmethod def get_default_shell_timeout(): diff --git a/src/frostfs_testlib/resources/cli.py b/src/frostfs_testlib/resources/cli.py index 5f7d468..06a9832 100644 --- a/src/frostfs_testlib/resources/cli.py +++ b/src/frostfs_testlib/resources/cli.py @@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm") # Config for frostfs-adm utility. Optional if tests are running against devenv FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") -CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None) +CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s") diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index b3afd88..3cc3f35 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -1,11 +1,12 @@ import json import logging import re -import requests from dataclasses import dataclass from time import sleep from typing import Optional, Union +import requests + from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC @@ -291,18 +292,17 @@ def delete_container( force: bool = False, session_token: Optional[str] = None, await_mode: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> None: """ A wrapper for `frostfs-cli container delete` call. Args: + await_mode: Block execution until container is removed. wallet (str): path to a wallet on whose behalf we delete the container cid (str): ID of the container to delete shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key force (bool): do not check whether container contains locks and remove immediately session_token: a path to session token file - timeout: Timeout for the operation. This function doesn't return anything. """ @@ -314,7 +314,6 @@ def delete_container( force=force, session=session_token, await_mode=await_mode, - timeout=timeout, ) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 803524a..610b76a 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -732,23 +732,24 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: @reporter.step("Search object nodes") def get_object_nodes( cluster: Cluster, - wallet: str, cid: str, oid: str, - shell: Shell, - endpoint: str, + alive_node: ClusterNode, bearer: str = "", xhdr: Optional[dict] = None, is_direct: bool = False, verify_presence_all: bool = False, - wallet_config: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> list[ClusterNode]: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + shell = alive_node.host.get_shell() + endpoint = alive_node.storage_node.get_rpc_endpoint() + wallet = alive_node.storage_node.get_remote_wallet_path() + wallet_config = alive_node.storage_node.get_remote_wallet_config_path() + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config) result_object_nodes = cli.object.nodes( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, bearer=bearer, diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 710262a..5b9d694 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -9,6 +9,7 @@ class ConfigAttributes: LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" LOCAL_WALLET_CONFIG = "local_wallet_config_path" + REMOTE_WALLET_CONFIG = "remote_wallet_config_path" ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" ENDPOINT_INTERNAL = "endpoint_internal0" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index bf36665..72b12a9 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -114,6 +114,14 @@ class NodeBase(HumanReadableABC): ConfigAttributes.CONFIG_PATH, ) + def get_remote_wallet_config_path(self) -> str: + """ + Returns node config file path located on remote host + """ + return self._get_attribute( + ConfigAttributes.REMOTE_WALLET_CONFIG, + ) + def get_wallet_config_path(self) -> str: return self._get_attribute( ConfigAttributes.LOCAL_WALLET_CONFIG, @@ -125,8 +133,11 @@ class NodeBase(HumanReadableABC): Returns config path for logger located on remote host """ config_attributes = self.host.get_service_config(self.name) - return self._get_attribute( - ConfigAttributes.LOGGER_CONFIG_PATH) if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes else None + return ( + self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH) + if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes + else None + ) @property def config_dir(self) -> str: From e04fac0770be267b0c2cf094d164c239d7a3be07 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 22 Jan 2024 19:06:38 +0300 Subject: [PATCH 114/274] [#164] Add local flag to preset in load Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 6f355fc..f072a4e 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -166,6 +166,9 @@ class Preset: # Flag to control preset erorrs ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False) + # Flag to ensure created containers store data on local endpoints + local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False) + @dataclass class LoadParams: From 82f9df088a78b284299ce9deb6bec317d48a51ca Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 24 Jan 2024 15:23:26 +0300 Subject: [PATCH 115/274] [#167] Strip components for new xk6 archive and update unit tests Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/runners.py | 4 ++-- tests/test_load_config.py | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index dd6d50e..532c590 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -4,6 +4,7 @@ import math import re import time from dataclasses import fields +from threading import Event from typing import Optional from urllib.parse import urlparse @@ -30,7 +31,6 @@ from frostfs_testlib.testing import parallel, run_optionally from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils.file_keeper import FileKeeper -from threading import Event class RunnerBase(ScenarioRunner): @@ -314,7 +314,7 @@ class LocalRunner(RunnerBase): with reporter.step("Download K6"): shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") - shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}") + shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}") shell.exec(f"sudo chmod -R 777 {k6_dir}") with reporter.step("Create empty_passwd"): diff --git a/tests/test_load_config.py b/tests/test_load_config.py index f4fa022..834d051 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -143,6 +143,7 @@ class TestLoadConfig: "--policy 'container_placement_policy'", "--ignore-errors", "--sleep '19'", + "--local", ] expected_env_vars = { "DURATION": 9, @@ -172,6 +173,7 @@ class TestLoadConfig: "--policy 'container_placement_policy'", "--ignore-errors", "--sleep '19'", + "--local", ] expected_env_vars = { "DURATION": 9, @@ -304,6 +306,7 @@ class TestLoadConfig: @pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True) def test_argument_parsing_for_http_scenario(self, load_params: LoadParams): + load_params.preset.local = False expected_preset_args = [ "--no-verify-ssl", "--size '11'", @@ -334,6 +337,7 @@ class TestLoadConfig: @pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True) def test_argument_parsing_for_local_scenario(self, load_params: LoadParams): + load_params.preset.local = False expected_preset_args = [ "--size '11'", "--preload_obj '13'", From 0d7a15877c17551f4f2a3dc00ce06ac46a8d6769 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 26 Jan 2024 15:29:02 +0300 Subject: [PATCH 116/274] [#169] Update metrics Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_metrics.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 5502b5c..2dad3f6 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -107,66 +107,66 @@ class DeleteOperationMetric(OperationMetric): class GrpcWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "frostfs_obj_put_total" + _SUCCESS = "frostfs_obj_put_success" _ERRORS = "frostfs_obj_put_fails" _LATENCY = "frostfs_obj_put_duration" class GrpcReadOperationMetric(ReadOperationMetric): - _SUCCESS = "frostfs_obj_get_total" + _SUCCESS = "frostfs_obj_get_success" _ERRORS = "frostfs_obj_get_fails" _LATENCY = "frostfs_obj_get_duration" class GrpcDeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "frostfs_obj_delete_total" + _SUCCESS = "frostfs_obj_delete_success" _ERRORS = "frostfs_obj_delete_fails" _LATENCY = "frostfs_obj_delete_duration" class S3WriteOperationMetric(WriteOperationMetric): - _SUCCESS = "aws_obj_put_total" + _SUCCESS = "aws_obj_put_success" _ERRORS = "aws_obj_put_fails" _LATENCY = "aws_obj_put_duration" class S3ReadOperationMetric(ReadOperationMetric): - _SUCCESS = "aws_obj_get_total" + _SUCCESS = "aws_obj_get_success" _ERRORS = "aws_obj_get_fails" _LATENCY = "aws_obj_get_duration" class S3DeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "aws_obj_delete_total" + _SUCCESS = "aws_obj_delete_success" _ERRORS = "aws_obj_delete_fails" _LATENCY = "aws_obj_delete_duration" class S3LocalWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "s3local_obj_put_total" + _SUCCESS = "s3local_obj_put_success" _ERRORS = "s3local_obj_put_fails" _LATENCY = "s3local_obj_put_duration" class S3LocalReadOperationMetric(ReadOperationMetric): - _SUCCESS = "s3local_obj_get_total" + _SUCCESS = "s3local_obj_get_success" _ERRORS = "s3local_obj_get_fails" _LATENCY = "s3local_obj_get_duration" class LocalWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "local_obj_put_total" + _SUCCESS = "local_obj_put_success" _ERRORS = "local_obj_put_fails" _LATENCY = "local_obj_put_duration" class LocalReadOperationMetric(ReadOperationMetric): - _SUCCESS = "local_obj_get_total" + _SUCCESS = "local_obj_get_success" _ERRORS = "local_obj_get_fails" class LocalDeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "local_obj_delete_total" + _SUCCESS = "local_obj_delete_success" _ERRORS = "local_obj_delete_fails" From 6caa77dedfc0f4ecedc440c0e734bd5b695f5787 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Wed, 31 Jan 2024 16:42:30 +0300 Subject: [PATCH 117/274] [#172] parallel get remote binaries versions --- src/frostfs_testlib/utils/version_utils.py | 100 +++++++++++++-------- 1 file changed, 61 insertions(+), 39 deletions(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 75ce8a5..2c1f4ab 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -2,10 +2,11 @@ import logging import re from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.hosting import Hosting +from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell +from frostfs_testlib.testing.parallel import parallel logger = logging.getLogger("NeoLogger") @@ -33,53 +34,74 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: return versions +def parallel_binary_verions(host: Host) -> dict[str, str]: + versions_by_host = {} + + binary_path_by_name = {} # Maps binary name to executable path + for service_config in host.config.services: + exec_path = service_config.attributes.get("exec_path") + requires_check = service_config.attributes.get("requires_version_check", "true") + if exec_path: + binary_path_by_name[service_config.name] = { + "exec_path": exec_path, + "check": requires_check.lower() == "true", + } + for cli_config in host.config.clis: + requires_check = cli_config.attributes.get("requires_version_check", "true") + binary_path_by_name[cli_config.name] = { + "exec_path": cli_config.exec_path, + "check": requires_check.lower() == "true", + } + + shell = host.get_shell() + versions_at_host = {} + for binary_name, binary in binary_path_by_name.items(): + try: + binary_path = binary["exec_path"] + result = shell.exec(f"{binary_path} --version") + versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} + except Exception as exc: + logger.error(f"Cannot get version for {binary_path} because of\n{exc}") + versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} + versions_by_host[host.config.address] = versions_at_host + return versions_by_host + + def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: versions_by_host = {} - for host in hosting.hosts: - binary_path_by_name = {} # Maps binary name to executable path - for service_config in host.config.services: - exec_path = service_config.attributes.get("exec_path") - requires_check = service_config.attributes.get("requires_version_check", "true") - version_parameter = service_config.attributes.get("custom_version_parameter", "--version") - if exec_path: - binary_path_by_name[service_config.name] = { - "exec_path": exec_path, - "check": requires_check.lower() == "true", - "version_parameter": version_parameter, - } - for cli_config in host.config.clis: - requires_check = cli_config.attributes.get("requires_version_check", "true") - version_parameter = service_config.attributes.get("custom_version_parameter", "--version") - binary_path_by_name[cli_config.name] = { - "exec_path": cli_config.exec_path, - "check": requires_check.lower() == "true", - "version_parameter": version_parameter, - } - - shell = host.get_shell() - versions_at_host = {} - for binary_name, binary in binary_path_by_name.items(): - try: - binary_path = binary["exec_path"] - result = shell.exec(f"{binary_path} {binary['version_parameter']}") - versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} - except Exception as exc: - logger.error(f"Cannot get version for {binary_path} because of\n{exc}") - versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} - versions_by_host[host.config.address] = versions_at_host + future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) + for future in future_binary_verions: + versions_by_host.update(future.result()) # Consolidate versions across all hosts + cheak_versions = {} + exсeptions = [] + exception = set() + previous_host = None versions = {} + captured_version = None for host, binary_versions in versions_by_host.items(): for name, binary in binary_versions.items(): - captured_version = versions.get(name, {}).get("version") version = binary["version"] - if captured_version: - assert captured_version == version, f"Binary {name} has inconsistent version on host {host}" + if not cheak_versions.get(f'{name[:-2]}', None): + captured_version = cheak_versions.get(f'{name[:-2]}',{}).get(host, {}).get(captured_version) + cheak_versions[f'{name[:-2]}'] = {host: {version: name}} else: - versions[name] = {"version": version, "check": binary["check"]} - return versions - + captured_version = list(cheak_versions.get(f'{name[:-2]}',{}).get(previous_host).keys())[0] + cheak_versions[f'{name[:-2]}'].update({host:{version:name}}) + + if captured_version and captured_version != version: + exception.add(name[:-2]) + + versions[name] = {"version": version, "check": binary["check"]} + previous_host = host + if exception: + for i in exception: + for host in versions_by_host.keys(): + for version, name in cheak_versions.get(i).get(host).items(): + exсeptions.append(f'Binary {name} has inconsistent version {version} on host {host}') + exсeptions.append('\n') + return versions, exсeptions def _parse_version(version_output: str) -> str: version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) From 8ba2cb80308cd588ca2fb360bfbcb7a231be7573 Mon Sep 17 00:00:00 2001 From: mkadilov Date: Wed, 31 Jan 2024 15:43:24 +0300 Subject: [PATCH 118/274] [#171] Components versions check Components versions check Signed-off-by: Mikhail Kadilov m.kadilov@yadro.com --- pyproject.toml | 10 +++++----- src/frostfs_testlib/storage/cluster.py | 8 ++++---- src/frostfs_testlib/storage/constants.py | 8 -------- 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7d3e5b0..74a163e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,11 +51,11 @@ basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager" [project.entry-points."frostfs.testlib.services"] -s = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode" -s3-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate" -http-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" -morph-chain = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" -ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" +frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode" +frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate" +frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" +neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" +frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" [tool.isort] profile = "black" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 313215a..c867515 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -105,7 +105,7 @@ class ClusterNode: service_entry = self.class_registry.get_entry(service_type) service_name = service_entry["hosting_service_name"] - pattern = f"{service_name}{self.id:02}" + pattern = f"{service_name}_{self.id:02}" config = self.host.get_service_config(pattern) return service_type( @@ -120,7 +120,7 @@ class ClusterNode: svcs_names_on_node = [svc.name for svc in self.host.config.services] for entry in self.class_registry._class_mapping.values(): hosting_svc_name = entry["hosting_service_name"] - pattern = f"{hosting_svc_name}{self.id:02}" + pattern = f"{hosting_svc_name}_{self.id:02}" if pattern in svcs_names_on_node: config = self.host.get_service_config(pattern) svcs.append( @@ -267,13 +267,13 @@ class Cluster: service_name = service["hosting_service_name"] cls: type[NodeBase] = service["cls"] - pattern = f"{service_name}\d*$" + pattern = f"{service_name}_\d*$" configs = self.hosting.find_service_configs(pattern) found_nodes = [] for config in configs: # config.name is something like s3-gate01. Cut last digits to know service type - service_type = re.findall(".*\D", config.name)[0] + service_type = re.findall("(.*)_\d+", config.name)[0] # exclude unsupported services if service_type != service_name: continue diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 5b9d694..3d75988 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -18,11 +18,3 @@ class ConfigAttributes: UN_LOCODE = "un_locode" HTTP_HOSTNAME = "http_hostname" S3_HOSTNAME = "s3_hostname" - - -class _FrostfsServicesNames: - STORAGE = "s" - S3_GATE = "s3-gate" - HTTP_GATE = "http-gate" - MORPH_CHAIN = "morph-chain" - INNER_RING = "ir" From d79fd87ede254bfd483a25934dfd1c54c6c61201 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 5 Feb 2024 12:41:29 +0300 Subject: [PATCH 119/274] [#174] Add flag to remove registry file Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 2 ++ src/frostfs_testlib/load/runners.py | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index f072a4e..1932e69 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -235,6 +235,8 @@ class LoadParams: registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) # In case if we want to use custom registry file left from another load run custom_registry: Optional[str] = None + # In case if we want to use custom registry file left from another load run + force_fresh_registry: Optional[bool] = None # Specifies the minimum duration of every single execution (i.e. iteration). # Any iterations that are shorter than this value will cause that VU to # sleep for the remainder of the time until the specified minimum duration is reached. diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 532c590..d456270 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -78,6 +78,10 @@ class DefaultRunner(RunnerBase): nodes_under_load: list[ClusterNode], k6_dir: str, ): + if load_params.force_fresh_registry and load_params.custom_registry: + with reporter.step("Forcing fresh registry files"): + parallel(self._force_fresh_registry, self.loaders, load_params) + if load_params.load_type != LoadType.S3: return @@ -88,6 +92,11 @@ class DefaultRunner(RunnerBase): parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir) + def _force_fresh_registry(self, loader: Loader, load_params: LoadParams): + with reporter.step(f"Forcing fresh registry on {loader.ip}"): + shell = loader.get_shell() + shell.exec(f"rm -f {load_params.registry_file}") + def _prepare_loader( self, loader: Loader, From 4f3814690e750df0840274c7b4a6a1de0028ebbf Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 5 Feb 2024 18:49:45 +0300 Subject: [PATCH 120/274] [TrueCloudLab/xk6-frostfs#125] Add acl option Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 2 ++ tests/test_load_config.py | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 1932e69..532be16 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -147,6 +147,8 @@ class Preset: pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) # Workers count for preset workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) + # Acl for container/buckets + acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) # ------ GRPC ------ # Amount of containers which should be created diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 834d051..8f28621 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -136,6 +136,7 @@ class TestLoadConfig: def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): expected_preset_args = [ "--size '11'", + "--acl 'acl'", "--preload_obj '13'", "--out 'pregen_json'", "--workers '7'", @@ -174,6 +175,7 @@ class TestLoadConfig: "--ignore-errors", "--sleep '19'", "--local", + "--acl 'acl'", ] expected_env_vars = { "DURATION": 9, @@ -211,6 +213,7 @@ class TestLoadConfig: "--location 's3_location'", "--ignore-errors", "--sleep '19'", + "--acl 'acl'", ] expected_env_vars = { "DURATION": 9, @@ -242,6 +245,7 @@ class TestLoadConfig: "--location 's3_location'", "--ignore-errors", "--sleep '19'", + "--acl 'acl'", ] expected_env_vars = { "DURATION": 183900, @@ -279,6 +283,7 @@ class TestLoadConfig: "--location 's3_location'", "--ignore-errors", "--sleep '19'", + "--acl 'acl'", ] expected_env_vars = { "DURATION": 9, @@ -317,6 +322,7 @@ class TestLoadConfig: "--policy 'container_placement_policy'", "--ignore-errors", "--sleep '19'", + "--acl 'acl'", ] expected_env_vars = { "DURATION": 9, @@ -347,6 +353,7 @@ class TestLoadConfig: "--policy 'container_placement_policy'", "--ignore-errors", "--sleep '19'", + "--acl 'acl'", ] expected_env_vars = { "CONFIG_FILE": "config_file", @@ -399,6 +406,7 @@ class TestLoadConfig: "--containers '0'", "--policy ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "DURATION": 0, @@ -427,6 +435,7 @@ class TestLoadConfig: "--containers '0'", "--policy ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "DURATION": 0, @@ -462,6 +471,7 @@ class TestLoadConfig: "--buckets '0'", "--location ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "DURATION": 0, @@ -490,6 +500,7 @@ class TestLoadConfig: "--buckets '0'", "--location ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "DURATION": 0, @@ -525,6 +536,7 @@ class TestLoadConfig: "--containers '0'", "--policy ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "DURATION": 0, @@ -553,6 +565,7 @@ class TestLoadConfig: "--containers '0'", "--policy ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "CONFIG_FILE": "", From 751381cd60b909076a371d2d2973a5523a59ca1a Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 14 Feb 2024 16:16:59 +0300 Subject: [PATCH 121/274] Add GenericCli utility Signed-off-by: Andrey Berezin --- src/frostfs_testlib/cli/__init__.py | 1 + src/frostfs_testlib/cli/generic_cli.py | 30 ++++++++ src/frostfs_testlib/hosting/config.py | 5 +- src/frostfs_testlib/hosting/interfaces.py | 4 +- src/frostfs_testlib/steps/cli/container.py | 6 +- src/frostfs_testlib/steps/http/http_gate.py | 74 +++++++++---------- .../controllers/cluster_state_controller.py | 2 + .../storage/dataclasses/frostfs_services.py | 29 ++------ 8 files changed, 80 insertions(+), 71 deletions(-) create mode 100644 src/frostfs_testlib/cli/generic_cli.py diff --git a/src/frostfs_testlib/cli/__init__.py b/src/frostfs_testlib/cli/__init__.py index 3799be9..7e3d243 100644 --- a/src/frostfs_testlib/cli/__init__.py +++ b/src/frostfs_testlib/cli/__init__.py @@ -1,4 +1,5 @@ from frostfs_testlib.cli.frostfs_adm import FrostfsAdm from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate from frostfs_testlib.cli.frostfs_cli import FrostfsCli +from frostfs_testlib.cli.generic_cli import GenericCli from frostfs_testlib.cli.neogo import NeoGo, NetworkType diff --git a/src/frostfs_testlib/cli/generic_cli.py b/src/frostfs_testlib/cli/generic_cli.py new file mode 100644 index 0000000..2a80159 --- /dev/null +++ b/src/frostfs_testlib/cli/generic_cli.py @@ -0,0 +1,30 @@ +from typing import Optional + +from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.shell.interfaces import CommandOptions, Shell + + +class GenericCli(object): + def __init__(self, cli_name: str, host: Host) -> None: + self.host = host + self.cli_name = cli_name + + def __call__( + self, + args: Optional[str] = "", + pipes: Optional[str] = "", + shell: Optional[Shell] = None, + options: Optional[CommandOptions] = None, + ): + if not shell: + shell = self.host.get_shell() + + cli_config = self.host.get_cli_config(self.cli_name, True) + extra_args = "" + exec_path = self.cli_name + if cli_config: + extra_args = " ".join(cli_config.extra_args) + exec_path = cli_config.exec_path + + cmd = f"{exec_path} {args} {extra_args} {pipes}" + return shell.exec(cmd, options) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 4ab66d7..8b256cc 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -10,9 +10,7 @@ class ParsedAttributes: def parse(cls, attributes: dict[str, Any]): # Pick attributes supported by the class field_names = set(field.name for field in fields(cls)) - supported_attributes = { - key: value for key, value in attributes.items() if key in field_names - } + supported_attributes = {key: value for key, value in attributes.items() if key in field_names} return cls(**supported_attributes) @@ -29,6 +27,7 @@ class CLIConfig: name: str exec_path: str attributes: dict[str, str] = field(default_factory=dict) + extra_args: list[str] = field(default_factory=list) @dataclass diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 3b2d718..13051e2 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -54,7 +54,7 @@ class Host(ABC): raise ValueError(f"Unknown service name: '{service_name}'") return service_config - def get_cli_config(self, cli_name: str) -> CLIConfig: + def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig: """Returns config of CLI tool with specified name. The CLI must be located on this host. @@ -66,7 +66,7 @@ class Host(ABC): Config of the CLI tool. """ cli_config = self._cli_config_by_name.get(cli_name) - if cli_config is None: + if cli_config is None and not allow_empty: raise ValueError(f"Unknown CLI name: '{cli_name}'") return cli_config diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 3cc3f35..82ff407 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -8,7 +8,7 @@ from typing import Optional, Union import requests from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.cli import FrostfsCli, GenericCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -345,8 +345,8 @@ def _parse_cid(output: str) -> str: @reporter.step("Search container by name") def search_container_by_name(name: str, node: ClusterNode): - node_shell = node.host.get_shell() - output = node_shell.exec(f"curl -I HEAD http://127.0.0.1:8084/{name}") + curl = GenericCli("curl", node.host) + output = curl(f"-I http://127.0.0.1:8084/{name}") pattern = r"X-Container-Id: (\S+)" cid = re.findall(pattern, output.stdout) if cid: diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index a8c9899..3f4d838 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -11,13 +11,14 @@ from urllib.parse import quote_plus import requests from frostfs_testlib import reporter +from frostfs_testlib.cli import GenericCli from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object -from frostfs_testlib.storage.cluster import StorageNode +from frostfs_testlib.storage.cluster import ClusterNode, StorageNode from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils.file_utils import get_file_hash @@ -31,8 +32,7 @@ local_shell = LocalShell() def get_via_http_gate( cid: str, oid: str, - endpoint: str, - http_hostname: str, + node: ClusterNode, request_path: Optional[str] = None, timeout: Optional[int] = 300, ): @@ -40,18 +40,19 @@ def get_via_http_gate( This function gets given object from HTTP gate cid: container id to get object from oid: object ID - endpoint: http gate endpoint - http_hostname: http host name on the node + node: node to make request request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] """ # if `request_path` parameter omitted, use default if request_path is None: - request = f"{endpoint}/get/{cid}/{oid}" + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" else: - request = f"{endpoint}{request_path}" + request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False) + resp = requests.get( + request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False + ) if not resp.ok: raise Exception( @@ -72,15 +73,14 @@ def get_via_http_gate( @reporter.step("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300): +def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Optional[int] = 300): """ This function gets given object from HTTP gate cid: container id to get object from prefix: common prefix - endpoint: http gate endpoint - http_hostname: http host name on the node + node: node to make request """ - request = f"{endpoint}/zip/{cid}/{prefix}" + request = f"{node.http_gate.get_endpoint()}/zip/{cid}/{prefix}" resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: @@ -109,8 +109,7 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: s def get_via_http_gate_by_attribute( cid: str, attribute: dict, - endpoint: str, - http_hostname: str, + node: ClusterNode, request_path: Optional[str] = None, timeout: Optional[int] = 300, ): @@ -126,11 +125,13 @@ def get_via_http_gate_by_attribute( attr_value = quote_plus(str(attribute.get(attr_name))) # if `request_path` parameter ommited, use default if request_path is None: - request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" else: - request = f"{endpoint}{request_path}" + request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}) + resp = requests.get( + request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]} + ) if not resp.ok: raise Exception( @@ -247,19 +248,18 @@ def upload_via_http_gate_curl( @retry(max_attempts=3, sleep_interval=1) @reporter.step("Get via HTTP Gate using Curl") -def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: +def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: """ This function gets given object from HTTP gate using curl utility. cid: CID to get object from oid: object OID - endpoint: http gate endpoint - http_hostname: http host name of the node + node: node for request """ - request = f"{endpoint}/get/{cid}/{oid}" + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") - cmd = f'curl -k -H "Host: {http_hostname}" {request} > {file_path}' - local_shell.exec(cmd) + curl = GenericCli("curl", node.host) + curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {file_path}", shell=local_shell) return file_path @@ -274,12 +274,11 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): def try_to_get_object_and_expect_error( cid: str, oid: str, + node: ClusterNode, error_pattern: str, - endpoint: str, - http_hostname: str, ) -> None: try: - get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) + get_via_http_gate(cid=cid, oid=oid, node=node) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() @@ -292,13 +291,10 @@ def get_object_by_attr_and_verify_hashes( file_name: str, cid: str, attrs: dict, - endpoint: str, - http_hostname: str, + node: ClusterNode, ) -> None: - got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) - got_file_path_http_attr = get_via_http_gate_by_attribute( - cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname - ) + got_file_path_http = get_via_http_gate(cid=cid, oid=oid, node=node) + got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs, node=node) assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr) @@ -309,8 +305,7 @@ def verify_object_hash( cid: str, shell: Shell, nodes: list[StorageNode], - endpoint: str, - http_hostname: str, + request_node: ClusterNode, object_getter=None, ) -> None: @@ -336,7 +331,7 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) + got_file_path_http = object_getter(cid=cid, oid=oid, node=request_node) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -365,10 +360,9 @@ def attr_into_str_header_curl(attrs: dict) -> list: def try_to_get_object_via_passed_request_and_expect_error( cid: str, oid: str, + node: ClusterNode, error_pattern: str, - endpoint: str, http_request_path: str, - http_hostname: str, attrs: Optional[dict] = None, ) -> None: try: @@ -376,17 +370,15 @@ def try_to_get_object_via_passed_request_and_expect_error( get_via_http_gate( cid=cid, oid=oid, - endpoint=endpoint, + node=node, request_path=http_request_path, - http_hostname=http_hostname, ) else: get_via_http_gate_by_attribute( cid=cid, attribute=attrs, - endpoint=endpoint, + node=node, request_path=http_request_path, - http_hostname=http_hostname, ) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index f51be78..69df675 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -326,6 +326,8 @@ class ClusterStateController: @reporter.step("Restore blocked nodes") def restore_all_traffic(self): + if not self.dropped_traffic: + return parallel(self._restore_traffic_to_node, self.dropped_traffic) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 33e7894..ddc650a 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -5,6 +5,7 @@ from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.node_base import NodeBase from frostfs_testlib.storage.dataclasses.shard import Shard + class InnerRing(NodeBase): """ Class represents inner ring node in a cluster @@ -17,11 +18,7 @@ class InnerRing(NodeBase): def service_healthcheck(self) -> bool: health_metric = "frostfs_ir_ir_health" - output = ( - self.host.get_shell() - .exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d") - .stdout - ) + output = self.host.get_shell().exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d").stdout return health_metric in output def get_netmap_cleaner_threshold(self) -> str: @@ -50,11 +47,7 @@ class S3Gate(NodeBase): def service_healthcheck(self) -> bool: health_metric = "frostfs_s3_gw_state_health" - output = ( - self.host.get_shell() - .exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d") - .stdout - ) + output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout return health_metric in output @property @@ -72,11 +65,7 @@ class HTTPGate(NodeBase): def service_healthcheck(self) -> bool: health_metric = "frostfs_http_gw_state_health" - output = ( - self.host.get_shell() - .exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d") - .stdout - ) + output = self.host.get_shell().exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d").stdout return health_metric in output @property @@ -135,11 +124,7 @@ class StorageNode(NodeBase): def service_healthcheck(self) -> bool: health_metric = "frostfs_node_state_health" - output = ( - self.host.get_shell() - .exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d") - .stdout - ) + output = self.host.get_shell().exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d").stdout return health_metric in output def get_shard_config_path(self) -> str: @@ -174,10 +159,10 @@ class StorageNode(NodeBase): def get_storage_config(self) -> str: return self.host.get_storage_config(self.name) - def get_http_hostname(self) -> str: + def get_http_hostname(self) -> list[str]: return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) - def get_s3_hostname(self) -> str: + def get_s3_hostname(self) -> list[str]: return self._get_attribute(ConfigAttributes.S3_HOSTNAME) def delete_blobovnicza(self): From 55cebc042c49a59f699136d9258be6682ef0fbff Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 19 Feb 2024 17:48:09 +0300 Subject: [PATCH 122/274] [#183] Read all configuration files for service config Signed-off-by: Andrey Berezin --- src/frostfs_testlib/storage/cluster.py | 5 +- .../configuration/service_configuration.py | 75 ++++++++++++------- .../storage/dataclasses/frostfs_services.py | 18 ++--- .../storage/dataclasses/node_base.py | 9 ++- .../storage/dataclasses/shard.py | 13 +--- 5 files changed, 68 insertions(+), 52 deletions(-) diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index c867515..23130cb 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -9,7 +9,6 @@ from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml -from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass @@ -72,6 +71,7 @@ class ClusterNode: def s3_gate(self) -> S3Gate: return self.service(S3Gate) + # TODO: Deprecated. Use config with ServiceConfigurationYml interface def get_config(self, config_file_path: str) -> dict: shell = self.host.get_shell() @@ -81,6 +81,7 @@ class ClusterNode: config = yaml.safe_load(config_text) return config + # TODO: Deprecated. Use config with ServiceConfigurationYml interface def save_config(self, new_config: dict, config_file_path: str) -> None: shell = self.host.get_shell() @@ -88,7 +89,7 @@ class ClusterNode: shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: - return ServiceConfiguration(self.service(service_type)) + return self.service(service_type).config def service(self, service_type: type[ServiceClass]) -> ServiceClass: """ diff --git a/src/frostfs_testlib/storage/configuration/service_configuration.py b/src/frostfs_testlib/storage/configuration/service_configuration.py index f7b3be7..fddd64a 100644 --- a/src/frostfs_testlib/storage/configuration/service_configuration.py +++ b/src/frostfs_testlib/storage/configuration/service_configuration.py @@ -5,51 +5,74 @@ from typing import Any import yaml from frostfs_testlib import reporter -from frostfs_testlib.shell.interfaces import CommandOptions +from frostfs_testlib.shell.interfaces import CommandOptions, Shell from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml -from frostfs_testlib.storage.dataclasses.node_base import ServiceClass + + +def extend_dict(extend_me: dict, extend_by: dict): + if isinstance(extend_by, dict): + for k, v in extend_by.items(): + if k in extend_me: + extend_dict(extend_me.get(k), v) + else: + extend_me[k] = v + else: + extend_me += extend_by class ServiceConfiguration(ServiceConfigurationYml): - def __init__(self, service: "ServiceClass") -> None: - self.service = service - self.shell = self.service.host.get_shell() - self.confd_path = os.path.join(self.service.config_dir, "conf.d") + def __init__(self, service_name: str, shell: Shell, config_dir: str, main_config_path: str) -> None: + self.service_name = service_name + self.shell = shell + self.main_config_path = main_config_path + self.confd_path = os.path.join(config_dir, "conf.d") self.custom_file = os.path.join(self.confd_path, "99_changes.yml") def _path_exists(self, path: str) -> bool: return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code - def _get_data_from_file(self, path: str) -> dict: - content = self.shell.exec(f"cat {path}").stdout - data = yaml.safe_load(content) - return data + def _get_config_files(self): + config_files = [self.main_config_path] - def get(self, key: str) -> str: - with reporter.step(f"Get {key} configuration value for {self.service}"): - config_files = [self.service.main_config_path] + if self._path_exists(self.confd_path): + files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split() + # Sorting files in backwards order from latest to first one + config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0]))) - if self._path_exists(self.confd_path): - files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split() - # Sorting files in backwards order from latest to first one - config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0]))) + return config_files - result = None - for file in files: - data = self._get_data_from_file(file) - result = self._find_option(key, data) - if result is not None: - break + def _get_configuration(self, config_files: list[str]) -> dict: + if not config_files: + return [{}] + splitter = "+++++" + files_str = " ".join(config_files) + all_content = self.shell.exec( + f"echo Getting config files; for file in {files_str}; do (echo {splitter}; sudo cat ${{file}}); done" + ).stdout + files_content = all_content.split("+++++")[1:] + files_data = [yaml.safe_load(file_content) for file_content in files_content] + + mergedData = {} + for data in files_data: + extend_dict(mergedData, data) + + return mergedData + + def get(self, key: str) -> str | Any: + with reporter.step(f"Get {key} configuration value for {self.service_name}"): + config_files = self._get_config_files() + configuration = self._get_configuration(config_files) + result = self._find_option(key, configuration) return result def set(self, values: dict[str, Any]): - with reporter.step(f"Change configuration for {self.service}"): + with reporter.step(f"Change configuration for {self.service_name}"): if not self._path_exists(self.confd_path): self.shell.exec(f"mkdir {self.confd_path}") if self._path_exists(self.custom_file): - data = self._get_data_from_file(self.custom_file) + data = self._get_configuration([self.custom_file]) else: data = {} @@ -61,5 +84,5 @@ class ServiceConfiguration(ServiceConfigurationYml): self.shell.exec(f"chmod 777 {self.custom_file}") def revert(self): - with reporter.step(f"Revert changed options for {self.service}"): + with reporter.step(f"Revert changed options for {self.service_name}"): self.shell.exec(f"rm -rf {self.custom_file}") diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index ddc650a..9e671d5 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -127,25 +127,23 @@ class StorageNode(NodeBase): output = self.host.get_shell().exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d").stdout return health_metric in output + # TODO: Deprecated. Use new approach with config def get_shard_config_path(self) -> str: return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH) + # TODO: Deprecated. Use new approach with config def get_shards_config(self) -> tuple[str, dict]: return self.get_config(self.get_shard_config_path()) def get_shards(self) -> list[Shard]: - config = self.get_shards_config()[1] - config["storage"]["shard"].pop("default") - return [Shard.from_object(shard) for shard in config["storage"]["shard"].values()] + shards = self.config.get("storage:shard") - def get_shards_from_env(self) -> list[Shard]: - config = self.get_shards_config()[1] - configObj = ConfigObj(StringIO(config)) + if not shards: + raise RuntimeError(f"Cannot get shards information for {self.name} on {self.host.config.address}") - pattern = f"{SHARD_PREFIX}\d*" - num_shards = len(set(re.findall(pattern, self.get_shards_config()))) - - return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)] + if "default" in shards: + shards.pop("default") + return [Shard.from_object(shard) for shard in shards.values()] def get_control_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 72b12a9..8291345 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -10,6 +10,7 @@ from frostfs_testlib import reporter from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration, ServiceConfigurationYml from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils @@ -147,7 +148,11 @@ class NodeBase(HumanReadableABC): def main_config_path(self) -> str: return self._get_attribute(ConfigAttributes.CONFIG_PATH) - # TODO: Deprecated + @property + def config(self) -> ServiceConfigurationYml: + return ServiceConfiguration(self.name, self.host.get_shell(), self.config_dir, self.main_config_path) + + # TODO: Deprecated. Use config with ServiceConfigurationYml interface def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]: if config_file_path is None: config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) @@ -160,7 +165,7 @@ class NodeBase(HumanReadableABC): config = yaml.safe_load(config_text) return config_file_path, config - # TODO: Deprecated + # TODO: Deprecated. Use config with ServiceConfigurationYml interface def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None: if config_file_path is None: config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) diff --git a/src/frostfs_testlib/storage/dataclasses/shard.py b/src/frostfs_testlib/storage/dataclasses/shard.py index 584138d..170a477 100644 --- a/src/frostfs_testlib/storage/dataclasses/shard.py +++ b/src/frostfs_testlib/storage/dataclasses/shard.py @@ -1,16 +1,6 @@ -import json -import pathlib -import re from dataclasses import dataclass -from io import StringIO -import allure -import pytest -import yaml from configobj import ConfigObj -from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_" BLOBSTOR_PREFIX = "_BLOBSTOR_" @@ -94,6 +84,5 @@ class Shard: blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]], metabase=metabase, writecache=writecache, - pilorama=pilorama + pilorama=pilorama, ) - From 273f0d13a52c63f45eda5a4c92df01b8ce76a309 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 20 Feb 2024 13:27:45 +0300 Subject: [PATCH 123/274] [#184] Add streaming param Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 2 ++ tests/test_load_config.py | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 532be16..7bde399 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -221,6 +221,8 @@ class LoadParams: ) # Percentage of filling of all data disks on all nodes fill_percent: Optional[float] = None + # if set, the payload is generated on the fly and is not read into memory fully. + streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 8f28621..dc019b7 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -156,6 +156,7 @@ class TestLoadConfig: "READERS": 7, "DELETERS": 8, "READ_AGE": 8, + "STREAMING": 9, "PREGEN_JSON": "pregen_json", "PREPARE_LOCALLY": True, } @@ -195,6 +196,7 @@ class TestLoadConfig: "READ_RATE": 9, "READ_AGE": 8, "DELETE_RATE": 11, + "STREAMING": 9, "PREPARE_LOCALLY": True, } @@ -225,6 +227,7 @@ class TestLoadConfig: "READERS": 7, "DELETERS": 8, "READ_AGE": 8, + "STREAMING": 9, "NO_VERIFY_SSL": True, "PREGEN_JSON": "pregen_json", } @@ -265,6 +268,7 @@ class TestLoadConfig: "WRITE_RATE": 10, "READ_RATE": 9, "READ_AGE": 8, + "STREAMING": 9, "DELETE_RATE": 11, } @@ -303,6 +307,7 @@ class TestLoadConfig: "WRITE_RATE": 10, "READ_RATE": 9, "READ_AGE": 8, + "STREAMING": 9, "DELETE_RATE": 11, } @@ -335,6 +340,7 @@ class TestLoadConfig: "READERS": 7, "DELETERS": 8, "READ_AGE": 8, + "STREAMING": 9, "PREGEN_JSON": "pregen_json", } @@ -366,6 +372,7 @@ class TestLoadConfig: "READERS": 7, "DELETERS": 8, "READ_AGE": 8, + "STREAMING": 9, "PREGEN_JSON": "pregen_json", } @@ -418,6 +425,7 @@ class TestLoadConfig: "READERS": 0, "DELETERS": 0, "READ_AGE": 0, + "STREAMING": 0, "PREGEN_JSON": "", "PREPARE_LOCALLY": False, } @@ -455,6 +463,7 @@ class TestLoadConfig: "READ_RATE": 0, "DELETE_RATE": 0, "READ_AGE": 0, + "STREAMING": 0, "PREPARE_LOCALLY": False, } @@ -483,6 +492,7 @@ class TestLoadConfig: "READERS": 0, "DELETERS": 0, "READ_AGE": 0, + "STREAMING": 0, "NO_VERIFY_SSL": False, "PREGEN_JSON": "", } @@ -521,6 +531,7 @@ class TestLoadConfig: "READ_RATE": 0, "DELETE_RATE": 0, "READ_AGE": 0, + "STREAMING": 0, } self._check_preset_params(load_params, expected_preset_args) @@ -549,6 +560,7 @@ class TestLoadConfig: "READERS": 0, "DELETERS": 0, "READ_AGE": 0, + "STREAMING": 0, "PREGEN_JSON": "", } @@ -578,6 +590,7 @@ class TestLoadConfig: "READERS": 0, "DELETERS": 0, "READ_AGE": 0, + "STREAMING": 0, "PREGEN_JSON": "", } From 3fc3eaadf32eeaf6af65e4ff673582ad9412cb8c Mon Sep 17 00:00:00 2001 From: mkadilov Date: Mon, 19 Feb 2024 13:01:29 +0300 Subject: [PATCH 124/274] [#182] Refactoring old functions for FrostfsCli Refactoring old functions for FrostfsCli Signed-off-by: Mikhail Kadilov m.kadilov@yadro.com --- .../cli/frostfs_cli/control.py | 29 +++- src/frostfs_testlib/cli/frostfs_cli/object.py | 2 +- src/frostfs_testlib/cli/frostfs_cli/shards.py | 10 +- src/frostfs_testlib/steps/node_management.py | 137 +++++++++++------- .../storage/controllers/shards_watcher.py | 6 +- 5 files changed, 121 insertions(+), 63 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/control.py b/src/frostfs_testlib/cli/frostfs_cli/control.py index bfcd6ec..2cddfdf 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/control.py +++ b/src/frostfs_testlib/cli/frostfs_cli/control.py @@ -39,14 +39,12 @@ class FrostfsCliControl(CliCommand): address: Optional[str] = None, timeout: Optional[str] = None, ) -> CommandResult: - """Set status of the storage node in FrostFS network map + """Health check for FrostFS storage nodes Args: wallet: Path to the wallet or binary key address: Address of wallet account endpoint: Remote node control address (as 'multiaddr' or ':') - force: Force turning to local maintenance - status: New netmap status keyword ('online', 'offline', 'maintenance') timeout: Timeout for an operation (default 15s) Returns: @@ -56,3 +54,28 @@ class FrostfsCliControl(CliCommand): "control healthcheck", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def drop_objects( + self, + endpoint: str, + objects: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + wallet: Path to the wallet or binary key + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + objects: List of object addresses to be removed in string format + timeout: Timeout for an operation (default 15s) + + Returns: + Command`s result. + """ + return self._execute( + "control drop-objects", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) \ No newline at end of file diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 0e4654b..38a69e4 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -357,7 +357,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, - generate_key: Optional = None, + generate_key: Optional[bool] = None, oid: Optional[str] = None, trace: bool = False, root: bool = False, diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 1727249..4399b13 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -39,10 +39,10 @@ class FrostfsCliShards(CliCommand): def set_mode( self, endpoint: str, - wallet: str, - wallet_password: str, mode: str, id: Optional[list[str]], + wallet: Optional[str] = None, + wallet_password: Optional[str] = None, address: Optional[str] = None, all: bool = False, clear_errors: bool = False, @@ -65,6 +65,11 @@ class FrostfsCliShards(CliCommand): Returns: Command's result. """ + if not wallet_password: + return self._execute( + "control shards set-mode", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) return self._execute_with_password( "control shards set-mode", wallet_password, @@ -137,3 +142,4 @@ class FrostfsCliShards(CliCommand): wallet_password, **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) + diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index 28e3820..dd38279 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -13,7 +13,6 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.storage.cluster import Cluster, StorageNode -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils import datetime_utils logger = logging.getLogger("NeoLogger") @@ -52,9 +51,24 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus: Returns: health status as HealthStatus object. """ - command = "control healthcheck" - output = _run_control_command_with_retries(node, command) - return HealthStatus.from_stdout(output) + + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + result = cli.control.healthcheck(control_endpoint) + + return HealthStatus.from_stdout(result.stdout) @reporter.step("Set status for {node}") @@ -66,8 +80,21 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> status: online or offline. retries (optional, int): number of retry attempts if it didn't work from the first time """ - command = f"control set-status --status {status}" - _run_control_command_with_retries(node, command, retries) + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + cli.control.set_status(control_endpoint, status) @reporter.step("Get netmap snapshot") @@ -91,7 +118,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: @reporter.step("Get shard list for {node}") -def node_shard_list(node: StorageNode) -> list[str]: +def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]: """ The function returns list of shards for specified storage node. Args: @@ -99,31 +126,72 @@ def node_shard_list(node: StorageNode) -> list[str]: Returns: list of shards. """ - command = "control shards list" - output = _run_control_command_with_retries(node, command) - return re.findall(r"Shard (.*):", output) + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + result = cli.shards.list(endpoint=control_endpoint, json_mode=json) + + return re.findall(r"Shard (.*):", result.stdout) @reporter.step("Shard set for {node}") -def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: +def node_shard_set_mode(node: StorageNode, shard: list[str], mode: str) -> None: """ The function sets mode for specified shard. Args: node: node on which shard mode should be set. """ - command = f"control shards set-mode --id {shard} --mode {mode}" - return _run_control_command_with_retries(node, command) + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + cli.shards.set_mode(endpoint=control_endpoint, mode=mode, id=shard) @reporter.step("Drop object from {node}") -def drop_object(node: StorageNode, cid: str, oid: str) -> str: +def drop_object(node: StorageNode, cid: str, oid: str) -> None: """ The function drops object from specified node. Args: - node_id str: node from which object should be dropped. + node: node from which object should be dropped. """ - command = f"control drop-objects -o {cid}/{oid}" - return _run_control_command_with_retries(node, command) + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + objects = f"{cid}/{oid}" + cli.control.drop_objects(control_endpoint, objects) @reporter.step("Delete data from host for node {node}") @@ -238,38 +306,3 @@ def remove_nodes_from_map_morph( config_file=FROSTFS_ADM_CONFIG_PATH, ) frostfsadm.morph.remove_nodes(node_netmap_keys) - - -def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str: - for attempt in range(1 + retries): # original attempt + specified retries - try: - return _run_control_command(node, command) - except AssertionError as err: - if attempt < retries: - logger.warning(f"Command {command} failed with error {err} and will be retried") - continue - raise AssertionError(f"Command {command} failed with error {err}") from err - - -def _run_control_command(node: StorageNode, command: str) -> None: - host = node.host - - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'password: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - # TODO: implement cli.control - # cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - result = shell.exec( - f"{cli_config.exec_path} {command} --endpoint {control_endpoint} " - f"--wallet {wallet_path} --config {wallet_config_path}" - ) - return result.stdout diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index 95a419e..ad07ff4 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -97,8 +97,6 @@ class ShardsWatcher: response = shards_cli.list( endpoint=self.storage_node.get_control_endpoint(), - wallet=self.storage_node.get_remote_wallet_path(), - wallet_password=self.storage_node.get_wallet_password(), json_mode=True, ) @@ -110,9 +108,7 @@ class ShardsWatcher: self.storage_node.host.get_cli_config("frostfs-cli").exec_path, ) return shards_cli.set_mode( - self.storage_node.get_control_endpoint(), - self.storage_node.get_remote_wallet_path(), - self.storage_node.get_wallet_password(), + endpoint=self.storage_node.get_control_endpoint(), mode=mode, id=[shard_id], clear_errors=clear_errors, From f5a7ff5c90e4c67c394881f7405e57104d464aef Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 21 Feb 2024 17:59:15 +0300 Subject: [PATCH 125/274] [#185] Add prometheus load parameters --- src/frostfs_testlib/load/k6.py | 32 +++++++++++++++--------- src/frostfs_testlib/load/load_config.py | 33 ++++++++++++++++++++++++- tests/test_load_config.py | 15 ++++++++++- 3 files changed, 67 insertions(+), 13 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 38167d2..1e98b98 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -61,7 +61,7 @@ class K6: self._k6_dir: str = k6_dir command = ( - f"{self._k6_dir}/k6 run {self._generate_env_variables()} " + f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} " f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" ) user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None @@ -75,12 +75,12 @@ class K6: def _get_fill_percents(self): fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n") return [line.split() for line in fill_percents][:-1] - + def check_fill_percent(self): fill_percents = self._get_fill_percents() percent_mean = 0 for line in fill_percents: - percent_mean += float(line[1].split('%')[0]) + percent_mean += float(line[1].split("%")[0]) percent_mean = percent_mean / len(fill_percents) logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}") return percent_mean >= self.load_params.fill_percent @@ -125,9 +125,9 @@ class K6: self.preset_output = result.stdout.strip("\n") return self.preset_output - @reporter.step("Generate K6 command") - def _generate_env_variables(self) -> str: - env_vars = self.load_params.get_env_vars() + @reporter.step("Generate K6 variables") + def _generate_k6_variables(self) -> str: + env_vars = self.load_params.get_k6_vars() env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) env_vars["SUMMARY_JSON"] = self.summary_json @@ -135,6 +135,14 @@ class K6: reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) + @reporter.step("Generate env variables") + def _generate_env_variables(self) -> str: + env_vars = self.load_params.get_env_vars() + if not env_vars: + return "" + reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables") + return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " " + def get_start_time(self) -> datetime: return datetime.fromtimestamp(self._k6_process.start_time()) @@ -188,23 +196,25 @@ class K6: wait_interval = min_wait_interval if self._k6_process is None: assert "No k6 instances were executed" - + while timeout > 0: if not self.load_params.fill_percent is None: with reporter.step(f"Check the percentage of filling of all data disks on the node"): if self.check_fill_percent(): - logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%") + logger.info( + f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%" + ) event.set() self.stop() return - + if event.is_set(): self.stop() return - + if not self._k6_process.running(): return - + remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" logger.info( diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 7bde399..b859971 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -94,16 +94,18 @@ def metadata_field( string_repr: Optional[bool] = True, distributed: Optional[bool] = False, formatter: Optional[Callable] = None, + env_variable: Optional[str] = None, ): return field( default=None, metadata={ "applicable_scenarios": applicable_scenarios, "preset_argument": preset_param, - "env_variable": scenario_variable, + "scenario_variable": scenario_variable, "string_repr": string_repr, "distributed": distributed, "formatter": formatter, + "env_variable": env_variable, }, ) @@ -172,6 +174,20 @@ class Preset: local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False) +@dataclass +class PrometheusParams: + # Prometheus server URL + server_url: Optional[str] = metadata_field( + all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False + ) + # Prometheus trend stats + trend_stats: Optional[str] = metadata_field( + all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False + ) + # Additional tags + metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False) + + @dataclass class LoadParams: # ------- CONTROL PARAMS ------- @@ -223,6 +239,10 @@ class LoadParams: fill_percent: Optional[float] = None # if set, the payload is generated on the fly and is not read into memory fully. streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) + # Output format + output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False) + # Prometheus params + prometheus: Optional[PrometheusParams] = None # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. @@ -339,6 +359,17 @@ class LoadParams: if self.preset: self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") + def get_k6_vars(self): + env_vars = { + meta_field.metadata["scenario_variable"]: meta_field.value + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] + and meta_field.metadata["scenario_variable"] + and meta_field.value is not None + } + + return env_vars + def get_env_vars(self): env_vars = { meta_field.metadata["env_variable"]: meta_field.value diff --git a/tests/test_load_config.py b/tests/test_load_config.py index dc019b7..62339f6 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -157,6 +157,7 @@ class TestLoadConfig: "DELETERS": 8, "READ_AGE": 8, "STREAMING": 9, + "K6_OUT": "output", "PREGEN_JSON": "pregen_json", "PREPARE_LOCALLY": True, } @@ -181,6 +182,7 @@ class TestLoadConfig: expected_env_vars = { "DURATION": 9, "WRITE_OBJ_SIZE": 11, + "K6_OUT": "output", "REGISTRY_FILE": "registry_file", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_SETUP_TIMEOUT": "setup_timeout", @@ -221,6 +223,7 @@ class TestLoadConfig: "DURATION": 9, "WRITE_OBJ_SIZE": 11, "REGISTRY_FILE": "registry_file", + "K6_OUT": "output", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_SETUP_TIMEOUT": "setup_timeout", "WRITERS": 7, @@ -254,6 +257,7 @@ class TestLoadConfig: "DURATION": 183900, "WRITE_OBJ_SIZE": 11, "REGISTRY_FILE": "registry_file", + "K6_OUT": "output", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_SETUP_TIMEOUT": "setup_timeout", "NO_VERIFY_SSL": True, @@ -293,6 +297,7 @@ class TestLoadConfig: "DURATION": 9, "WRITE_OBJ_SIZE": 11, "REGISTRY_FILE": "registry_file", + "K6_OUT": "output", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_SETUP_TIMEOUT": "setup_timeout", "NO_VERIFY_SSL": True, @@ -332,6 +337,7 @@ class TestLoadConfig: expected_env_vars = { "DURATION": 9, "WRITE_OBJ_SIZE": 11, + "K6_OUT": "output", "NO_VERIFY_SSL": True, "REGISTRY_FILE": "registry_file", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", @@ -365,6 +371,7 @@ class TestLoadConfig: "CONFIG_FILE": "config_file", "DURATION": 9, "WRITE_OBJ_SIZE": 11, + "K6_OUT": "output", "REGISTRY_FILE": "registry_file", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_SETUP_TIMEOUT": "setup_timeout", @@ -419,6 +426,7 @@ class TestLoadConfig: "DURATION": 0, "WRITE_OBJ_SIZE": 0, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "WRITERS": 0, @@ -449,6 +457,7 @@ class TestLoadConfig: "DURATION": 0, "WRITE_OBJ_SIZE": 0, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "MAX_WRITERS": 0, @@ -486,6 +495,7 @@ class TestLoadConfig: "DURATION": 0, "WRITE_OBJ_SIZE": 0, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "WRITERS": 0, @@ -516,6 +526,7 @@ class TestLoadConfig: "DURATION": 0, "WRITE_OBJ_SIZE": 0, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "NO_VERIFY_SSL": False, @@ -554,6 +565,7 @@ class TestLoadConfig: "WRITE_OBJ_SIZE": 0, "NO_VERIFY_SSL": False, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "WRITERS": 0, @@ -584,6 +596,7 @@ class TestLoadConfig: "DURATION": 0, "WRITE_OBJ_SIZE": 0, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "WRITERS": 0, @@ -655,7 +668,7 @@ class TestLoadConfig: assert sorted(preset_parameters) == sorted(expected_preset_args) def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]): - env_vars = load_params.get_env_vars() + env_vars = load_params.get_k6_vars() assert env_vars == expected_env_vars def _check_all_values_none(self, dataclass, skip_fields=None): From 22b41b227fbddf0704f5529d24d85552ac08d340 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 27 Feb 2024 11:57:54 +0300 Subject: [PATCH 126/274] [#186] Add total bytes to report Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/interfaces/summarized.py | 2 ++ src/frostfs_testlib/load/load_metrics.py | 4 ++++ src/frostfs_testlib/load/load_report.py | 6 ++++++ 3 files changed, 12 insertions(+) diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py index a005963..54947b4 100644 --- a/src/frostfs_testlib/load/interfaces/summarized.py +++ b/src/frostfs_testlib/load/interfaces/summarized.py @@ -50,6 +50,7 @@ class SummarizedStats: throughput: float = field(default_factory=float) latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies) errors: SummarizedErorrs = field(default_factory=SummarizedErorrs) + total_bytes: int = field(default_factory=int) passed: bool = True def calc_stats(self): @@ -85,6 +86,7 @@ class SummarizedStats: target.latencies.by_node[node_key] = operation.latency target.throughput += operation.throughput target.errors.threshold = load_params.error_threshold + target.total_bytes = operation.total_bytes if operation.failed_iterations: target.errors.by_node[node_key] = operation.failed_iterations diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 2dad3f6..035ce8b 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -39,6 +39,10 @@ class OperationMetric(ABC): def throughput(self) -> float: return self._get_metric_rate(self._THROUGHPUT) + @property + def total_bytes(self) -> float: + return self._get_metric(self._THROUGHPUT) + def _get_metric(self, metric: str) -> int: metrics_method_map = { "counter": self._get_counter_metric, diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 22ddb54..2dfac26 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -120,6 +120,11 @@ class LoadReport: throughput, unit = calc_unit(stats.throughput) throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") + bytes_html = "" + if stats.total_bytes > 0: + total_bytes, total_bytes_unit = calc_unit(stats.total_bytes) + bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}") + per_node_errors_html = "" for node_key, errors in stats.errors.by_node.items(): if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: @@ -148,6 +153,7 @@ class LoadReport: Metrics {self._row("Total operations", stats.operations)} {self._row("OP/sec", f"{stats.rate:.2f}")} + {bytes_html} {throughput_html} {latency_html} Errors From 09a7f66d1eda7d8eb251ad306be6181b1fb82a40 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 1 Mar 2024 02:15:40 +0300 Subject: [PATCH 127/274] [#188] Add CredentialsProvider Signed-off-by: Andrey Berezin --- pyproject.toml | 3 ++ .../credentials/authmate_s3.py | 49 +++++++++++++++++++ src/frostfs_testlib/credentials/interfaces.py | 25 ++++++++++ src/frostfs_testlib/hosting/config.py | 1 + src/frostfs_testlib/steps/s3/s3_helper.py | 49 +------------------ 5 files changed, 79 insertions(+), 48 deletions(-) create mode 100644 src/frostfs_testlib/credentials/authmate_s3.py create mode 100644 src/frostfs_testlib/credentials/interfaces.py diff --git a/pyproject.toml b/pyproject.toml index 74a163e..c9aaf74 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,6 +57,9 @@ frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" +[project.entry-points."frostfs.testlib.credentials_providers"] +authmate = "frostfs_testlib.credentials.authmate_s3:AuthmateS3CredentialsProvider" + [tool.isort] profile = "black" src_paths = ["src", "tests"] diff --git a/src/frostfs_testlib/credentials/authmate_s3.py b/src/frostfs_testlib/credentials/authmate_s3.py new file mode 100644 index 0000000..c77765c --- /dev/null +++ b/src/frostfs_testlib/credentials/authmate_s3.py @@ -0,0 +1,49 @@ +import re +from datetime import datetime + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsAuthmate +from frostfs_testlib.credentials.interfaces import S3CredentialsProvider +from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.container import list_containers +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + + +class AuthmateS3CredentialsProvider(S3CredentialsProvider): + @reporter.step("Init S3 Credentials using Authmate CLI") + def provide(self, cluster_node: ClusterNode) -> tuple[str, str]: + cluster: Cluster = self.stash["cluster"] + shell: Shell = self.stash["shell"] + wallet: WalletInfo = self.stash["wallet"] + endpoint = cluster_node.storage_node.get_rpc_endpoint() + + gate_public_keys = [s3gate.get_wallet_public_key() for s3gate in cluster.s3_gates] + # unique short bucket name + bucket = f"bucket_{hex(int(datetime.now().timestamp()*1000000))}" + + frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate.secret.issue( + wallet=wallet.path, + peer=endpoint, + gate_public_key=gate_public_keys, + wallet_password=wallet.password, + container_policy=self.stash.get("location_constraints"), + container_friendly_name=bucket, + ).stdout + + aws_access_key_id = str( + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id") + ) + aws_secret_access_key = str( + re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group( + "aws_secret_access_key" + ) + ) + cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) + + containers_list = list_containers(wallet.path, shell, endpoint) + assert cid in containers_list, f"Expected cid {cid} in {containers_list}" + + return aws_access_key_id, aws_secret_access_key diff --git a/src/frostfs_testlib/credentials/interfaces.py b/src/frostfs_testlib/credentials/interfaces.py new file mode 100644 index 0000000..8db43ad --- /dev/null +++ b/src/frostfs_testlib/credentials/interfaces.py @@ -0,0 +1,25 @@ +from abc import abstractmethod + +from frostfs_testlib.plugins import load_plugin +from frostfs_testlib.storage.cluster import ClusterNode + + +class S3CredentialsProvider(object): + stash: dict + + def __init__(self, stash: dict) -> None: + self.stash = stash + + @abstractmethod + def provide(self, cluster_node: ClusterNode) -> tuple[str, str]: + raise NotImplementedError("Directly called abstract class?") + + +class CredentialsProvider(object): + stash: dict + S3: S3CredentialsProvider + + def __init__(self, s3_plugin_name: str) -> None: + self.stash = {} + s3cls = load_plugin("frostfs.testlib.credentials_providers", s3_plugin_name) + self.S3 = s3cls(self.stash) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 8b256cc..310eab2 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -62,6 +62,7 @@ class HostConfig: plugin_name: str healthcheck_plugin_name: str address: str + s3_creds_plugin_name: str = field(default="authmate") services: list[ServiceConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list) attributes: dict[str, str] = field(default_factory=dict) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index dbd3765..f717fd4 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -1,25 +1,15 @@ -import json import logging import os -import re -import uuid from datetime import datetime, timedelta from typing import Optional from dateutil.parser import parse from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC -from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus -from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell -from frostfs_testlib.shell.interfaces import SshCredentials +from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.utils.cli_utils import _run_with_passwd logger = logging.getLogger("NeoLogger") @@ -161,43 +151,6 @@ def assert_s3_acl(acl_grants: list, permitted_users: str): logger.error("FULL_CONTROL is given to All Users") -@reporter.step("Init S3 Credentials") -def init_s3_credentials( - wallet: WalletInfo, - shell: Shell, - cluster: Cluster, - policy: Optional[dict] = None, - s3gates: Optional[list[S3Gate]] = None, - container_placement_policy: Optional[str] = None, -): - gate_public_keys = [] - bucket = str(uuid.uuid4()) - if not s3gates: - s3gates = [cluster.s3_gates[0]] - for s3gate in s3gates: - gate_public_keys.append(s3gate.get_wallet_public_key()) - frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) - issue_secret_output = frostfs_authmate_exec.secret.issue( - wallet=wallet.path, - peer=cluster.default_rpc_endpoint, - gate_public_key=gate_public_keys, - wallet_password=wallet.password, - container_policy=policy, - container_friendly_name=bucket, - container_placement_policy=container_placement_policy, - ).stdout - aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id") - ) - aws_secret_access_key = str( - re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group( - "aws_secret_access_key" - ) - ) - cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) - return cid, aws_access_key_id, aws_secret_access_key - - @reporter.step("Delete bucket with all objects") def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): versioning_status = s3_client.get_bucket_versioning_status(bucket) From 25925c637bad213695bb0a7d8f90e8ba902de517 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 11 Mar 2024 19:23:10 +0300 Subject: [PATCH 128/274] [#191] Credentials work overhaul Signed-off-by: Andrey Berezin --- pyproject.toml | 10 +- .../cli/frostfs_cli/container.py | 16 +-- src/frostfs_testlib/cli/frostfs_cli/netmap.py | 8 +- src/frostfs_testlib/cli/frostfs_cli/object.py | 16 +-- .../cli/frostfs_cli/session.py | 10 +- src/frostfs_testlib/cli/frostfs_cli/util.py | 14 +-- ...authmate_s3.py => authmate_s3_provider.py} | 32 +++-- src/frostfs_testlib/credentials/interfaces.py | 50 ++++++-- .../credentials/wallet_factory_provider.py | 14 +++ src/frostfs_testlib/hosting/config.py | 2 + src/frostfs_testlib/load/k6.py | 34 ++---- src/frostfs_testlib/load/runners.py | 114 ++++-------------- .../s3/curl_bucket_resolver.py | 16 +++ src/frostfs_testlib/s3/interfaces.py | 27 +++-- src/frostfs_testlib/steps/acl.py | 42 +++---- src/frostfs_testlib/steps/cli/container.py | 92 ++++++-------- src/frostfs_testlib/steps/cli/object.py | 99 +++++---------- .../steps/complex_object_actions.py | 14 +-- src/frostfs_testlib/steps/epoch.py | 8 +- src/frostfs_testlib/steps/s3/s3_helper.py | 11 +- src/frostfs_testlib/steps/session_token.py | 17 ++- src/frostfs_testlib/steps/storage_object.py | 6 +- src/frostfs_testlib/steps/storage_policy.py | 16 ++- src/frostfs_testlib/steps/tombstone.py | 16 +-- .../controllers/background_load_controller.py | 10 +- .../controllers/cluster_state_controller.py | 37 +++--- .../storage/dataclasses/acl.py | 20 ++- .../dataclasses/storage_object_info.py | 3 +- .../storage/dataclasses/wallet.py | 35 +++--- src/frostfs_testlib/utils/version_utils.py | 26 ++-- src/frostfs_testlib/utils/wallet_utils.py | 40 +++--- 31 files changed, 370 insertions(+), 485 deletions(-) rename src/frostfs_testlib/credentials/{authmate_s3.py => authmate_s3_provider.py} (58%) create mode 100644 src/frostfs_testlib/credentials/wallet_factory_provider.py create mode 100644 src/frostfs_testlib/s3/curl_bucket_resolver.py diff --git a/pyproject.toml b/pyproject.toml index c9aaf74..5a38dba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,15 +58,19 @@ neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" [project.entry-points."frostfs.testlib.credentials_providers"] -authmate = "frostfs_testlib.credentials.authmate_s3:AuthmateS3CredentialsProvider" +authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider" +wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider" + +[project.entry-points."frostfs.testlib.bucket_cid_resolver"] +frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver" [tool.isort] profile = "black" src_paths = ["src", "tests"] -line_length = 120 +line_length = 140 [tool.black] -line-length = 120 +line-length = 140 target-version = ["py310"] [tool.bumpver] diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 374c880..b5592e8 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -8,7 +8,7 @@ class FrostfsCliContainer(CliCommand): def create( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, attributes: Optional[dict] = None, basic_acl: Optional[str] = None, @@ -57,8 +57,8 @@ class FrostfsCliContainer(CliCommand): def delete( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, session: Optional[str] = None, @@ -93,8 +93,8 @@ class FrostfsCliContainer(CliCommand): def get( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, to: Optional[str] = None, @@ -129,8 +129,8 @@ class FrostfsCliContainer(CliCommand): def get_eacl( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, to: Optional[str] = None, @@ -166,7 +166,7 @@ class FrostfsCliContainer(CliCommand): def list( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, owner: Optional[str] = None, ttl: Optional[int] = None, @@ -197,8 +197,8 @@ class FrostfsCliContainer(CliCommand): def list_objects( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -227,8 +227,8 @@ class FrostfsCliContainer(CliCommand): def set_eacl( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, table: Optional[str] = None, @@ -264,8 +264,8 @@ class FrostfsCliContainer(CliCommand): def search_node( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index 8920893..d219940 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -8,7 +8,7 @@ class FrostfsCliNetmap(CliCommand): def epoch( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, @@ -38,7 +38,7 @@ class FrostfsCliNetmap(CliCommand): def netinfo( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, @@ -68,7 +68,7 @@ class FrostfsCliNetmap(CliCommand): def nodeinfo( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, json: bool = False, @@ -100,7 +100,7 @@ class FrostfsCliNetmap(CliCommand): def snapshot( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 38a69e4..5d5bd91 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -8,9 +8,9 @@ class FrostfsCliObject(CliCommand): def delete( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, session: Optional[str] = None, @@ -44,9 +44,9 @@ class FrostfsCliObject(CliCommand): def get( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, file: Optional[str] = None, @@ -88,9 +88,9 @@ class FrostfsCliObject(CliCommand): def hash( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, range: Optional[str] = None, @@ -130,9 +130,9 @@ class FrostfsCliObject(CliCommand): def head( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, file: Optional[str] = None, @@ -176,9 +176,9 @@ class FrostfsCliObject(CliCommand): def lock( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, lifetime: Optional[int] = None, expire_at: Optional[int] = None, address: Optional[str] = None, @@ -216,9 +216,9 @@ class FrostfsCliObject(CliCommand): def put( self, rpc_endpoint: str, - wallet: str, cid: str, file: str, + wallet: Optional[str] = None, address: Optional[str] = None, attributes: Optional[dict] = None, bearer: Optional[str] = None, @@ -267,10 +267,10 @@ class FrostfsCliObject(CliCommand): def range( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, range: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, file: Optional[str] = None, @@ -311,8 +311,8 @@ class FrostfsCliObject(CliCommand): def search( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, filters: Optional[list] = None, diff --git a/src/frostfs_testlib/cli/frostfs_cli/session.py b/src/frostfs_testlib/cli/frostfs_cli/session.py index e21cc23..857b13e 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/session.py +++ b/src/frostfs_testlib/cli/frostfs_cli/session.py @@ -9,7 +9,6 @@ class FrostfsCliSession(CliCommand): self, rpc_endpoint: str, wallet: str, - wallet_password: str, out: str, lifetime: Optional[int] = None, address: Optional[str] = None, @@ -30,12 +29,7 @@ class FrostfsCliSession(CliCommand): Returns: Command's result. """ - return self._execute_with_password( + return self._execute( "session create", - wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self"]}, ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/util.py b/src/frostfs_testlib/cli/frostfs_cli/util.py index 99acd0a..7914169 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/util.py +++ b/src/frostfs_testlib/cli/frostfs_cli/util.py @@ -6,12 +6,12 @@ from frostfs_testlib.shell import CommandResult class FrostfsCliUtil(CliCommand): def sign_bearer_token( - self, - wallet: str, - from_file: str, - to_file: str, - address: Optional[str] = None, - json: Optional[bool] = False, + self, + from_file: str, + to_file: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + json: Optional[bool] = False, ) -> CommandResult: """ Sign bearer token to use it in requests. @@ -33,9 +33,9 @@ class FrostfsCliUtil(CliCommand): def sign_session_token( self, - wallet: str, from_file: str, to_file: str, + wallet: Optional[str] = None, address: Optional[str] = None, ) -> CommandResult: """ diff --git a/src/frostfs_testlib/credentials/authmate_s3.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py similarity index 58% rename from src/frostfs_testlib/credentials/authmate_s3.py rename to src/frostfs_testlib/credentials/authmate_s3_provider.py index c77765c..6343b5a 100644 --- a/src/frostfs_testlib/credentials/authmate_s3.py +++ b/src/frostfs_testlib/credentials/authmate_s3_provider.py @@ -1,25 +1,26 @@ import re from datetime import datetime +from typing import Optional from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.credentials.interfaces import S3CredentialsProvider +from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC -from frostfs_testlib.shell import Shell +from frostfs_testlib.shell import LocalShell from frostfs_testlib.steps.cli.container import list_containers -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate class AuthmateS3CredentialsProvider(S3CredentialsProvider): @reporter.step("Init S3 Credentials using Authmate CLI") - def provide(self, cluster_node: ClusterNode) -> tuple[str, str]: - cluster: Cluster = self.stash["cluster"] - shell: Shell = self.stash["shell"] - wallet: WalletInfo = self.stash["wallet"] + def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials: + cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes + shell = LocalShell() + wallet = user.wallet endpoint = cluster_node.storage_node.get_rpc_endpoint() - gate_public_keys = [s3gate.get_wallet_public_key() for s3gate in cluster.s3_gates] + gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] # unique short bucket name bucket = f"bucket_{hex(int(datetime.now().timestamp()*1000000))}" @@ -29,21 +30,18 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider): peer=endpoint, gate_public_key=gate_public_keys, wallet_password=wallet.password, - container_policy=self.stash.get("location_constraints"), + container_policy=location_constraints, container_friendly_name=bucket, ).stdout - aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id") - ) + aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id")) aws_secret_access_key = str( - re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group( - "aws_secret_access_key" - ) + re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group("aws_secret_access_key") ) cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) containers_list = list_containers(wallet.path, shell, endpoint) assert cid in containers_list, f"Expected cid {cid} in {containers_list}" - return aws_access_key_id, aws_secret_access_key + user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key) + return user.s3_credentials diff --git a/src/frostfs_testlib/credentials/interfaces.py b/src/frostfs_testlib/credentials/interfaces.py index 8db43ad..c863da0 100644 --- a/src/frostfs_testlib/credentials/interfaces.py +++ b/src/frostfs_testlib/credentials/interfaces.py @@ -1,25 +1,51 @@ -from abc import abstractmethod +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import Any, Optional from frostfs_testlib.plugins import load_plugin -from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -class S3CredentialsProvider(object): - stash: dict +@dataclass +class S3Credentials: + access_key: str + secret_key: str - def __init__(self, stash: dict) -> None: - self.stash = stash + +@dataclass +class User: + name: str + attributes: dict[str, Any] = field(default_factory=dict) + wallet: WalletInfo | None = None + s3_credentials: S3Credentials | None = None + + +class S3CredentialsProvider(ABC): + def __init__(self, cluster: Cluster) -> None: + self.cluster = cluster @abstractmethod - def provide(self, cluster_node: ClusterNode) -> tuple[str, str]: + def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials: + raise NotImplementedError("Directly called abstract class?") + + +class GrpcCredentialsProvider(ABC): + def __init__(self, cluster: Cluster) -> None: + self.cluster = cluster + + @abstractmethod + def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: raise NotImplementedError("Directly called abstract class?") class CredentialsProvider(object): - stash: dict S3: S3CredentialsProvider + GRPC: GrpcCredentialsProvider - def __init__(self, s3_plugin_name: str) -> None: - self.stash = {} - s3cls = load_plugin("frostfs.testlib.credentials_providers", s3_plugin_name) - self.S3 = s3cls(self.stash) + def __init__(self, cluster: Cluster) -> None: + config = cluster.cluster_nodes[0].host.config + s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name) + self.S3 = s3_cls(cluster) + grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name) + self.GRPC = grpc_cls(cluster) diff --git a/src/frostfs_testlib/credentials/wallet_factory_provider.py b/src/frostfs_testlib/credentials/wallet_factory_provider.py new file mode 100644 index 0000000..4d1ab7a --- /dev/null +++ b/src/frostfs_testlib/credentials/wallet_factory_provider.py @@ -0,0 +1,14 @@ +from frostfs_testlib import reporter +from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS +from frostfs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo + + +class WalletFactoryProvider(GrpcCredentialsProvider): + @reporter.step("Init gRPC Credentials using wallet generation") + def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: + wallet_factory = WalletFactory(ASSETS_DIR, LocalShell()) + user.wallet = wallet_factory.create_wallet(file_name=user, password=DEFAULT_WALLET_PASS) + return user.wallet diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 310eab2..f52f8b7 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -63,6 +63,8 @@ class HostConfig: healthcheck_plugin_name: str address: str s3_creds_plugin_name: str = field(default="authmate") + grpc_creds_plugin_name: str = field(default="wallet_factory") + product: str = field(default="frostfs") services: list[ServiceConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list) attributes: dict[str, str] = field(default_factory=dict) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 1e98b98..caf3cfe 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -9,13 +9,13 @@ from typing import Any from urllib.parse import urlparse from frostfs_testlib import reporter +from frostfs_testlib.credentials.interfaces import User from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import wait_for_success EXIT_RESULT_CODE = 0 @@ -42,16 +42,16 @@ class K6: k6_dir: str, shell: Shell, loader: Loader, - wallet: WalletInfo, + user: User, ): if load_params.scenario is None: raise RuntimeError("Scenario should not be none") - self.load_params: LoadParams = load_params + self.load_params = load_params self.endpoints = endpoints - self.loader: Loader = loader - self.shell: Shell = shell - self.wallet = wallet + self.loader = loader + self.shell = shell + self.user = user self.preset_output: str = "" self.summary_json: str = os.path.join( self.load_params.working_dir, @@ -64,13 +64,9 @@ class K6: f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} " f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" ) - user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None - process_id = ( - self.load_params.load_id - if self.load_params.scenario != LoadScenario.VERIFY - else f"{self.load_params.load_id}_verify" - ) - self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id) + remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None + process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify" + self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id) def _get_fill_percents(self): fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n") @@ -103,8 +99,8 @@ class K6: preset_grpc: [ preset_grpc, f"--endpoint {','.join(self.endpoints)}", - f"--wallet {self.wallet.path} ", - f"--config {self.wallet.config_path} ", + f"--wallet {self.user.wallet.path} ", + f"--config {self.user.wallet.config_path} ", ], preset_s3: [ preset_s3, @@ -167,9 +163,7 @@ class K6: remaining_time = timeout - working_time setup_teardown_time = ( - int(K6_TEARDOWN_PERIOD) - + self.load_params.get_init_time() - + int(self.load_params.setup_timeout.replace("s", "").strip()) + int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip()) ) remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time timeout = remaining_time_including_setup_and_teardown @@ -201,9 +195,7 @@ class K6: if not self.load_params.fill_percent is None: with reporter.step(f"Check the percentage of filling of all data disks on the node"): if self.check_fill_percent(): - logger.info( - f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%" - ) + logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%") event.set() self.stop() return diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index d456270..a34786f 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -1,24 +1,20 @@ import copy import itertools import math -import re import time from dataclasses import fields from threading import Event from typing import Optional from urllib.parse import urlparse -import yaml - from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate +from frostfs_testlib.credentials.interfaces import S3Credentials, User from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader from frostfs_testlib.resources import optionals -from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES from frostfs_testlib.shell.command_inspectors import SuInspector @@ -26,7 +22,6 @@ from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel, run_optionally from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import datetime_utils @@ -57,17 +52,17 @@ class RunnerBase(ScenarioRunner): class DefaultRunner(RunnerBase): loaders: list[Loader] - loaders_wallet: WalletInfo + user: User def __init__( self, - loaders_wallet: WalletInfo, + user: User, load_ip_list: Optional[list[str]] = None, ) -> None: if load_ip_list is None: load_ip_list = LOAD_NODES self.loaders = RemoteLoader.from_ip_list(load_ip_list) - self.loaders_wallet = loaders_wallet + self.user = user @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step("Preparation steps") @@ -86,55 +81,27 @@ class DefaultRunner(RunnerBase): return with reporter.step("Init s3 client on loaders"): - storage_node = nodes_under_load[0].service(StorageNode) - s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] - grpc_peer = storage_node.get_rpc_endpoint() - - parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir) + s3_credentials = self.user.s3_credentials + parallel(self._aws_configure_on_loader, self.loaders, s3_credentials) def _force_fresh_registry(self, loader: Loader, load_params: LoadParams): with reporter.step(f"Forcing fresh registry on {loader.ip}"): shell = loader.get_shell() shell.exec(f"rm -f {load_params.registry_file}") - def _prepare_loader( + def _aws_configure_on_loader( self, loader: Loader, - load_params: LoadParams, - grpc_peer: str, - s3_public_keys: list[str], - k6_dir: str, + s3_credentials: S3Credentials, ): - with reporter.step(f"Init s3 client on {loader.ip}"): - shell = loader.get_shell() - frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) - issue_secret_output = frostfs_authmate_exec.secret.issue( - wallet=self.loaders_wallet.path, - peer=grpc_peer, - gate_public_key=s3_public_keys, - container_placement_policy=load_params.preset.container_placement_policy, - container_policy=f"{k6_dir}/scenarios/files/policy.json", - wallet_password=self.loaders_wallet.password, - ).stdout - aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( - "aws_access_key_id" - ) - ) - aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", - issue_secret_output, - ).group("aws_secret_access_key") - ) - + with reporter.step(f"Aws configure on {loader.ip}"): configure_input = [ - InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), - InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key), + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key), + InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key), InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""), ] - shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input)) @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): @@ -176,12 +143,10 @@ class DefaultRunner(RunnerBase): k6_dir, shell, loader, - self.loaders_wallet, + self.user, ) - def _get_distributed_load_params_list( - self, original_load_params: LoadParams, workers_count: int - ) -> list[LoadParams]: + def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]: divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) distributed_load_params: list[LoadParams] = [] @@ -266,18 +231,20 @@ class LocalRunner(RunnerBase): loaders: list[Loader] cluster_state_controller: ClusterStateController file_keeper: FileKeeper - wallet: WalletInfo + user: User def __init__( self, cluster_state_controller: ClusterStateController, file_keeper: FileKeeper, nodes_under_load: list[ClusterNode], + user: User, ) -> None: self.cluster_state_controller = cluster_state_controller self.file_keeper = file_keeper self.loaders = [NodeLoader(node) for node in nodes_under_load] self.nodes_under_load = nodes_under_load + self.user = user @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step("Preparation steps") @@ -326,11 +293,9 @@ class LocalRunner(RunnerBase): shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}") shell.exec(f"sudo chmod -R 777 {k6_dir}") - with reporter.step("Create empty_passwd"): - self.wallet = WalletInfo(f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml") - content = yaml.dump({"password": ""}) - shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') - shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") + with reporter.step("chmod 777 wallet related files on loader"): + shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}") + shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}") @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): @@ -363,7 +328,7 @@ class LocalRunner(RunnerBase): k6_dir, shell, loader, - self.wallet, + self.user, ) def start(self): @@ -453,7 +418,7 @@ class S3LocalRunner(LocalRunner): k6_dir, shell, loader, - self.wallet, + self.user, ) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @@ -466,17 +431,10 @@ class S3LocalRunner(LocalRunner): k6_dir: str, ): self.k6_dir = k6_dir - with reporter.step("Init s3 client on loaders"): - storage_node = nodes_under_load[0].service(StorageNode) - s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] - grpc_peer = storage_node.get_rpc_endpoint() - - parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer) + parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes) @reporter.step("Prepare node {cluster_node}") - def prepare_node( - self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str - ): + def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]): LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params) self.endpoints = cluster_node.s3_gate.get_all_endpoints() shell = cluster_node.host.get_shell() @@ -497,29 +455,9 @@ class S3LocalRunner(LocalRunner): shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz") with reporter.step(f"Init s3 client on {cluster_node.host_ip}"): - frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) - issue_secret_output = frostfs_authmate_exec.secret.issue( - wallet=self.wallet.path, - peer=grpc_peer, - gate_public_key=s3_public_keys, - container_placement_policy=load_params.preset.container_placement_policy, - container_policy=f"{k6_dir}/scenarios/files/policy.json", - wallet_password=self.wallet.password, - ).stdout - aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( - "aws_access_key_id" - ) - ) - aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", - issue_secret_output, - ).group("aws_secret_access_key") - ) configure_input = [ - InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), - InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key), + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key), + InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key), InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""), ] diff --git a/src/frostfs_testlib/s3/curl_bucket_resolver.py b/src/frostfs_testlib/s3/curl_bucket_resolver.py new file mode 100644 index 0000000..b713e79 --- /dev/null +++ b/src/frostfs_testlib/s3/curl_bucket_resolver.py @@ -0,0 +1,16 @@ +import re + +from frostfs_testlib.cli.generic_cli import GenericCli +from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.storage.cluster import ClusterNode + + +class CurlBucketContainerResolver(BucketContainerResolver): + def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: + curl = GenericCli("curl", node.host) + output = curl(f"-I http://127.0.0.1:8084/{bucket_name}") + pattern = r"X-Container-Id: (\S+)" + cid = re.findall(pattern, output.stdout) + if cid: + return cid[0] + return None diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index dd21823..b6a10e3 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -1,7 +1,8 @@ -from abc import abstractmethod +from abc import ABC, abstractmethod from datetime import datetime from typing import Literal, Optional, Union +from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum @@ -31,6 +32,22 @@ ACL_COPY = [ ] +class BucketContainerResolver(ABC): + @abstractmethod + def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: + """ + Resolve Container ID from bucket name + + Args: + node: node from where we want to resolve + bucket_name: name of the bucket + **kwargs: any other required params + + Returns: Container ID + """ + raise NotImplementedError("Call from abstract class") + + class S3ClientWrapper(HumanReadableABC): @abstractmethod def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str) -> None: @@ -296,15 +313,11 @@ class S3ClientWrapper(HumanReadableABC): abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.""" @abstractmethod - def upload_part( - self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str - ) -> str: + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: """Uploads a part in a multipart upload.""" @abstractmethod - def upload_part_copy( - self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str - ) -> str: + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: """Uploads a part by copying data from an existing object as data source.""" @abstractmethod diff --git a/src/frostfs_testlib/steps/acl.py b/src/frostfs_testlib/steps/acl.py index e97e4ee..da407b6 100644 --- a/src/frostfs_testlib/steps/acl.py +++ b/src/frostfs_testlib/steps/acl.py @@ -11,25 +11,20 @@ import base58 from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG +from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.acl import ( - EACL_LIFETIME, - FROSTFS_CONTRACT_CACHE_TIMEOUT, - EACLPubKey, - EACLRole, - EACLRule, -) +from frostfs_testlib.storage.dataclasses.acl import EACL_LIFETIME, FROSTFS_CONTRACT_CACHE_TIMEOUT, EACLPubKey, EACLRole, EACLRule +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import wallet_utils logger = logging.getLogger("NeoLogger") @reporter.step("Get extended ACL") -def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) +def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optional[str]: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) try: - result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid) + result = cli.container.get_eacl(rpc_endpoint=endpoint, cid=cid) except RuntimeError as exc: logger.info("Extended ACL table is not set for this container") logger.info(f"Got exception while getting eacl: {exc}") @@ -41,16 +36,15 @@ def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optiona @reporter.step("Set extended ACL") def set_eacl( - wallet_path: str, + wallet: WalletInfo, cid: str, eacl_table_path: str, shell: Shell, endpoint: str, session_token: Optional[str] = None, ) -> None: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.container.set_eacl( - wallet=wallet_path, rpc_endpoint=endpoint, cid=cid, table=eacl_table_path, @@ -66,7 +60,7 @@ def _encode_cid_for_eacl(cid: str) -> str: def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json") - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list) with open(table_file_path, "r") as file: @@ -77,7 +71,7 @@ def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: def form_bearertoken_file( - wif: str, + wallet: WalletInfo, cid: str, eacl_rule_list: List[Union[EACLRule, EACLPubKey]], shell: Shell, @@ -92,7 +86,7 @@ def form_bearertoken_file( enc_cid = _encode_cid_for_eacl(cid) if cid else None file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - eacl = get_eacl(wif, cid, shell, endpoint) + eacl = get_eacl(wallet, cid, shell, endpoint) json_eacl = dict() if eacl: eacl = eacl.replace("eACL: ", "").split("Signature")[0] @@ -133,7 +127,7 @@ def form_bearertoken_file( if sign: sign_bearer( shell=shell, - wallet_path=wif, + wallet=wallet, eacl_rules_file_from=file_path, eacl_rules_file_to=file_path, json=True, @@ -164,11 +158,9 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]: return rules -def sign_bearer(shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: - frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG) - frostfscli.util.sign_bearer_token( - wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json - ) +def sign_bearer(shell: Shell, wallet: WalletInfo, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: + frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + frostfscli.util.sign_bearer_token(eacl_rules_file_from, eacl_rules_file_to, json=json) @reporter.step("Wait for eACL cache expired") @@ -178,9 +170,7 @@ def wait_for_cache_expired(): @reporter.step("Return bearer token in base64 to caller") -def bearer_token_base64_from_file( - bearer_path: str, -) -> str: +def bearer_token_base64_from_file(bearer_path: str) -> str: with open(bearer_path, "rb") as file: signed = file.read() return base64.b64encode(signed).decode("utf-8") diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 82ff407..fc643e2 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -5,12 +5,11 @@ from dataclasses import dataclass from time import sleep from typing import Optional, Union -import requests - from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsCli, GenericCli +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG +from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node from frostfs_testlib.storage.cluster import Cluster, ClusterNode @@ -25,7 +24,7 @@ logger = logging.getLogger("NeoLogger") @dataclass class StorageContainerInfo: id: str - wallet_file: WalletInfo + wallet: WalletInfo class StorageContainer: @@ -42,11 +41,8 @@ class StorageContainer: def get_id(self) -> str: return self.storage_container_info.id - def get_wallet_path(self) -> str: - return self.storage_container_info.wallet_file.path - - def get_wallet_config_path(self) -> str: - return self.storage_container_info.wallet_file.config_path + def get_wallet(self) -> str: + return self.storage_container_info.wallet @reporter.step("Generate new object and put in container") def generate_object( @@ -61,37 +57,34 @@ class StorageContainer: file_hash = get_file_hash(file_path) container_id = self.get_id() - wallet_path = self.get_wallet_path() - wallet_config = self.get_wallet_config_path() + wallet = self.get_wallet() with reporter.step(f"Put object with size {size} to container {container_id}"): if endpoint: object_id = put_object( - wallet=wallet_path, + wallet=wallet, path=file_path, cid=container_id, expire_at=expire_at, shell=self.shell, endpoint=endpoint, bearer=bearer_token, - wallet_config=wallet_config, ) else: object_id = put_object_to_random_node( - wallet=wallet_path, + wallet=wallet, path=file_path, cid=container_id, expire_at=expire_at, shell=self.shell, cluster=self.cluster, bearer=bearer_token, - wallet_config=wallet_config, ) storage_object = StorageObjectInfo( container_id, object_id, size=size, - wallet_file_path=wallet_path, + wallet=wallet, file_path=file_path, file_hash=file_hash, ) @@ -106,14 +99,13 @@ REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" @reporter.step("Create Container") def create_container( - wallet: str, + wallet: WalletInfo, shell: Shell, endpoint: str, rule: str = DEFAULT_PLACEMENT_RULE, basic_acl: str = "", attributes: Optional[dict] = None, session_token: str = "", - session_wallet: str = "", name: Optional[str] = None, options: Optional[dict] = None, await_mode: bool = True, @@ -124,7 +116,7 @@ def create_container( A wrapper for `frostfs-cli container create` call. Args: - wallet (str): a wallet on whose behalf a container is created + wallet (WalletInfo): a wallet on whose behalf a container is created rule (optional, str): placement rule for container basic_acl (optional, str): an ACL for container, will be appended to `--basic-acl` key @@ -146,10 +138,9 @@ def create_container( (str): CID of the created container """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.container.create( rpc_endpoint=endpoint, - wallet=session_wallet if session_wallet else wallet, policy=rule, basic_acl=basic_acl, attributes=attributes, @@ -170,9 +161,7 @@ def create_container( return cid -def wait_for_container_creation( - wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1 -): +def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1): for _ in range(attempts): containers = list_containers(wallet, shell, endpoint) if cid in containers: @@ -182,9 +171,7 @@ def wait_for_container_creation( raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") -def wait_for_container_deletion( - wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1 -): +def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1): for _ in range(attempts): try: get_container(wallet, cid, shell=shell, endpoint=endpoint) @@ -198,29 +185,27 @@ def wait_for_container_deletion( @reporter.step("List Containers") -def list_containers( - wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT -) -> list[str]: +def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: """ A wrapper for `frostfs-cli container list` call. It returns all the available containers for the given wallet. Args: - wallet (str): a wallet on whose behalf we list the containers + wallet (WalletInfo): a wallet on whose behalf we list the containers shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key timeout: Timeout for the operation. Returns: (list): list of containers """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) logger.info(f"Containers: \n{result}") return result.stdout.split() @reporter.step("List Objects in container") def list_objects( - wallet: str, + wallet: WalletInfo, shell: Shell, container_id: str, endpoint: str, @@ -230,7 +215,7 @@ def list_objects( A wrapper for `frostfs-cli container list-objects` call. It returns all the available objects in container. Args: - wallet (str): a wallet on whose behalf we list the containers objects + wallet (WalletInfo): a wallet on whose behalf we list the containers objects shell: executor for cli command container_id: cid of container endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key @@ -238,15 +223,15 @@ def list_objects( Returns: (list): list of containers """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.list_objects(rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.list_objects(rpc_endpoint=endpoint, cid=container_id, timeout=timeout) logger.info(f"Container objects: \n{result}") return result.stdout.split() @reporter.step("Get Container") def get_container( - wallet: str, + wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, @@ -257,7 +242,7 @@ def get_container( A wrapper for `frostfs-cli container get` call. It extracts container's attributes and rearranges them into a more compact view. Args: - wallet (str): path to a wallet on whose behalf we get the container + wallet (WalletInfo): path to a wallet on whose behalf we get the container cid (str): ID of the container to get shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key @@ -267,8 +252,8 @@ def get_container( (dict, str): dict of container attributes """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.get(rpc_endpoint=endpoint, cid=cid, json_mode=json_mode, timeout=timeout) if not json_mode: return result.stdout @@ -285,7 +270,7 @@ def get_container( @reporter.step("Delete Container") # TODO: make the error message about a non-found container more user-friendly def delete_container( - wallet: str, + wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, @@ -297,7 +282,7 @@ def delete_container( A wrapper for `frostfs-cli container delete` call. Args: await_mode: Block execution until container is removed. - wallet (str): path to a wallet on whose behalf we delete the container + wallet (WalletInfo): path to a wallet on whose behalf we delete the container cid (str): ID of the container to delete shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key @@ -306,9 +291,8 @@ def delete_container( This function doesn't return anything. """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.container.delete( - wallet=wallet, cid=cid, rpc_endpoint=endpoint, force=force, @@ -345,26 +329,22 @@ def _parse_cid(output: str) -> str: @reporter.step("Search container by name") def search_container_by_name(name: str, node: ClusterNode): - curl = GenericCli("curl", node.host) - output = curl(f"-I http://127.0.0.1:8084/{name}") - pattern = r"X-Container-Id: (\S+)" - cid = re.findall(pattern, output.stdout) - if cid: - return cid[0] - return None + resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) + resolver: BucketContainerResolver = resolver_cls() + return resolver.resolve(node, name) @reporter.step("Search for nodes with a container") def search_nodes_with_container( - wallet: str, + wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, cluster: Cluster, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> list[ClusterNode]: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.search_node(rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.search_node(rpc_endpoint=endpoint, cid=cid, timeout=timeout) pattern = r"[0-9]+(?:\.[0-9]+){3}" nodes_ip = list(set(re.findall(pattern, result.stdout))) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 610b76a..5fe6054 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -9,9 +9,10 @@ from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli.neogo import NeoGo from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE -from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG +from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output @@ -20,7 +21,7 @@ logger = logging.getLogger("NeoLogger") @reporter.step("Get object from random node") def get_object_from_random_node( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, @@ -28,7 +29,6 @@ def get_object_from_random_node( bearer: Optional[str] = None, write_object: Optional[str] = None, xhdr: Optional[dict] = None, - wallet_config: Optional[str] = None, no_progress: bool = True, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -44,7 +44,6 @@ def get_object_from_random_node( cluster: cluster object bearer (optional, str): path to Bearer Token file, appends to `--bearer` key write_object (optional, str): path to downloaded file, appends to `--file` key - wallet_config(optional, str): path to the wallet config no_progress(optional, bool): do not show progress bar xhdr (optional, dict): Request X-Headers in form of Key=Value session (optional, dict): path to a JSON-encoded container session token @@ -62,7 +61,6 @@ def get_object_from_random_node( bearer, write_object, xhdr, - wallet_config, no_progress, session, timeout, @@ -71,7 +69,7 @@ def get_object_from_random_node( @reporter.step("Get object from {endpoint}") def get_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, @@ -79,7 +77,6 @@ def get_object( bearer: Optional[str] = None, write_object: Optional[str] = None, xhdr: Optional[dict] = None, - wallet_config: Optional[str] = None, no_progress: bool = True, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -88,14 +85,13 @@ def get_object( GET from FrostFS. Args: - wallet (str): wallet on whose behalf GET is done + wallet (WalletInfo): wallet on whose behalf GET is done cid (str): ID of Container where we get the Object from oid (str): Object ID shell: executor for cli command bearer: path to Bearer Token file, appends to `--bearer` key write_object: path to downloaded file, appends to `--file` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - wallet_config(optional, str): path to the wallet config no_progress(optional, bool): do not show progress bar xhdr (optional, dict): Request X-Headers in form of Key=Value session (optional, dict): path to a JSON-encoded container session token @@ -108,10 +104,9 @@ def get_object( write_object = str(uuid.uuid4()) file_path = os.path.join(ASSETS_DIR, write_object) - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.object.get( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, file=file_path, @@ -127,14 +122,13 @@ def get_object( @reporter.step("Get Range Hash from {endpoint}") def get_range_hash( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, range_cut: str, shell: Shell, endpoint: str, bearer: Optional[str] = None, - wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -151,17 +145,15 @@ def get_range_hash( range_cut: Range to take hash from in the form offset1:length1,..., value to pass to the `--range` parameter endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Values session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. timeout: Timeout for the operation. Returns: None """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.hash( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, range=range_cut, @@ -177,7 +169,7 @@ def get_range_hash( @reporter.step("Put object to random node") def put_object_to_random_node( - wallet: str, + wallet: WalletInfo, path: str, cid: str, shell: Shell, @@ -186,7 +178,6 @@ def put_object_to_random_node( copies_number: Optional[int] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, - wallet_config: Optional[str] = None, expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, @@ -205,7 +196,6 @@ def put_object_to_random_node( copies_number: Number of copies of the object to store within the RPC call attributes: User attributes in form of Key1=Value1,Key2=Value2 cluster: cluster under test - wallet_config: path to the wallet config no_progress: do not show progress bar expire_at: Last epoch in the life of the object xhdr: Request X-Headers in form of Key=Value @@ -226,7 +216,6 @@ def put_object_to_random_node( copies_number, attributes, xhdr, - wallet_config, expire_at, no_progress, session, @@ -236,7 +225,7 @@ def put_object_to_random_node( @reporter.step("Put object at {endpoint} in container {cid}") def put_object( - wallet: str, + wallet: WalletInfo, path: str, cid: str, shell: Shell, @@ -245,7 +234,6 @@ def put_object( copies_number: Optional[int] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, - wallet_config: Optional[str] = None, expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, @@ -263,7 +251,6 @@ def put_object( copies_number: Number of copies of the object to store within the RPC call attributes: User attributes in form of Key1=Value1,Key2=Value2 endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - wallet_config: path to the wallet config no_progress: do not show progress bar expire_at: Last epoch in the life of the object xhdr: Request X-Headers in form of Key=Value @@ -273,10 +260,9 @@ def put_object( (str): ID of uploaded Object """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.put( rpc_endpoint=endpoint, - wallet=wallet, file=path, cid=cid, attributes=attributes, @@ -297,13 +283,12 @@ def put_object( @reporter.step("Delete object {cid}/{oid} from {endpoint}") def delete_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, endpoint: str, bearer: str = "", - wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -318,7 +303,6 @@ def delete_object( shell: executor for cli command bearer: path to Bearer Token file, appends to `--bearer` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Value session: path to a JSON-encoded container session token timeout: Timeout for the operation. @@ -326,10 +310,9 @@ def delete_object( (str): Tombstone ID """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.delete( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, bearer=bearer, @@ -345,13 +328,12 @@ def delete_object( @reporter.step("Get Range") def get_range( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, range_cut: str, shell: Shell, endpoint: str, - wallet_config: Optional[str] = None, bearer: str = "", xhdr: Optional[dict] = None, session: Optional[str] = None, @@ -368,7 +350,6 @@ def get_range( shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key bearer: path to Bearer Token file, appends to `--bearer` key - wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Value session: path to a JSON-encoded container session token timeout: Timeout for the operation. @@ -377,10 +358,9 @@ def get_range( """ range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.object.range( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, range=range_cut, @@ -398,7 +378,7 @@ def get_range( @reporter.step("Lock Object") def lock_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, @@ -408,7 +388,6 @@ def lock_object( address: Optional[str] = None, bearer: Optional[str] = None, session: Optional[str] = None, - wallet_config: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -435,13 +414,12 @@ def lock_object( Lock object ID """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.lock( rpc_endpoint=endpoint, lifetime=lifetime, expire_at=expire_at, address=address, - wallet=wallet, cid=cid, oid=oid, bearer=bearer, @@ -459,14 +437,13 @@ def lock_object( @reporter.step("Search object") def search_object( - wallet: str, + wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, bearer: str = "", filters: Optional[dict] = None, expected_objects_list: Optional[list] = None, - wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, session: Optional[str] = None, phy: bool = False, @@ -484,7 +461,6 @@ def search_object( endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key filters: key=value pairs to filter Objects expected_objects_list: a list of ObjectIDs to compare found Objects with - wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Value session: path to a JSON-encoded container session token phy: Search physically stored objects. @@ -495,10 +471,9 @@ def search_object( list of found ObjectIDs """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.search( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, bearer=bearer, xhdr=xhdr, @@ -513,23 +488,18 @@ def search_object( if expected_objects_list: if sorted(found_objects) == sorted(expected_objects_list): - logger.info( - f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'" - ) + logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") else: - logger.warning( - f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'" - ) + logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") return found_objects @reporter.step("Get netmap netinfo") def get_netmap_netinfo( - wallet: str, + wallet: WalletInfo, shell: Shell, endpoint: str, - wallet_config: Optional[str] = None, address: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -539,7 +509,7 @@ def get_netmap_netinfo( Get netmap netinfo output from node Args: - wallet (str): wallet on whose behalf request is done + wallet (WalletInfo): wallet on whose behalf request is done shell: executor for cli command endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key address: Address of wallet account @@ -552,9 +522,8 @@ def get_netmap_netinfo( (dict): dict of parsed command output """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) output = cli.netmap.netinfo( - wallet=wallet, rpc_endpoint=endpoint, address=address, ttl=ttl, @@ -578,7 +547,7 @@ def get_netmap_netinfo( @reporter.step("Head object") def head_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, @@ -588,7 +557,6 @@ def head_object( json_output: bool = True, is_raw: bool = False, is_direct: bool = False, - wallet_config: Optional[str] = None, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ): @@ -596,7 +564,7 @@ def head_object( HEAD an Object. Args: - wallet (str): wallet on whose behalf HEAD is done + wallet (WalletInfo): wallet on whose behalf HEAD is done cid (str): ID of Container where we get the Object from oid (str): ObjectID to HEAD shell: executor for cli command @@ -608,7 +576,6 @@ def head_object( turns into `--raw` key is_direct(optional, bool): send request directly to the node or not; this flag turns into `--ttl 1` key - wallet_config(optional, str): path to the wallet config xhdr (optional, dict): Request X-Headers in form of Key=Value session (optional, dict): path to a JSON-encoded container session token timeout: Timeout for the operation. @@ -619,10 +586,9 @@ def head_object( (str): HEAD response as a plain text """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.head( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, bearer=bearer, @@ -673,7 +639,7 @@ def head_object( @reporter.step("Run neo-go dump-keys") -def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: +def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict: """ Run neo-go dump keys command @@ -761,9 +727,7 @@ def get_object_nodes( parsing_output = parse_cmd_table(result_object_nodes.stdout, "|") list_object_nodes = [ - node - for node in parsing_output - if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true" + node for node in parsing_output if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true" ] netmap_nodes_list = parse_netmap_output( @@ -780,10 +744,7 @@ def get_object_nodes( ] result = [ - cluster_node - for netmap_node in netmap_nodes - for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.host_ip + cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip ] return result diff --git a/src/frostfs_testlib/steps/complex_object_actions.py b/src/frostfs_testlib/steps/complex_object_actions.py index a67dd4c..e1a7088 100644 --- a/src/frostfs_testlib/steps/complex_object_actions.py +++ b/src/frostfs_testlib/steps/complex_object_actions.py @@ -14,11 +14,11 @@ from typing import Optional, Tuple from frostfs_testlib import reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo logger = logging.getLogger("NeoLogger") @@ -44,7 +44,7 @@ def get_storage_object_chunks( with reporter.step(f"Get complex object chunks (f{storage_object.oid})"): split_object_id = get_link_object( - storage_object.wallet_file_path, + storage_object.wallet, storage_object.cid, storage_object.oid, shell, @@ -53,7 +53,7 @@ def get_storage_object_chunks( timeout=timeout, ) head = head_object( - storage_object.wallet_file_path, + storage_object.wallet, storage_object.cid, split_object_id, shell, @@ -96,7 +96,7 @@ def get_complex_object_split_ranges( chunks_ids = get_storage_object_chunks(storage_object, shell, cluster) for chunk_id in chunks_ids: head = head_object( - storage_object.wallet_file_path, + storage_object.wallet, storage_object.cid, chunk_id, shell, @@ -114,13 +114,12 @@ def get_complex_object_split_ranges( @reporter.step("Get Link Object") def get_link_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode], bearer: str = "", - wallet_config: str = DEFAULT_WALLET_CONFIG, is_direct: bool = True, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ): @@ -154,7 +153,6 @@ def get_link_object( is_raw=True, is_direct=is_direct, bearer=bearer, - wallet_config=wallet_config, timeout=timeout, ) if resp["link"]: @@ -167,7 +165,7 @@ def get_link_object( @reporter.step("Get Last Object") def get_last_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index ef8f85a..ce7ed12 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -4,13 +4,7 @@ from typing import Optional from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo -from frostfs_testlib.resources.cli import ( - CLI_DEFAULT_TIMEOUT, - FROSTFS_ADM_CONFIG_PATH, - FROSTFS_ADM_EXEC, - FROSTFS_CLI_EXEC, - NEOGO_EXECUTABLE, -) +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.payment_neogo import get_contract_hash diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index f717fd4..baf362b 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -10,6 +10,7 @@ from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo logger = logging.getLogger("NeoLogger") @@ -28,9 +29,7 @@ def check_objects_in_bucket( assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}" for bucket_object in unexpected_objects: - assert ( - bucket_object not in bucket_objects - ), f"Expected object {bucket_object} not in objects list {bucket_objects}" + assert bucket_object not in bucket_objects, f"Expected object {bucket_object} not in objects list {bucket_objects}" @reporter.step("Try to get object and got error") @@ -58,9 +57,7 @@ def object_key_from_file_path(full_path: str) -> str: return os.path.basename(full_path) -def assert_tags( - actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None -) -> None: +def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None) -> None: expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] if expected_tags == []: @@ -180,7 +177,7 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): def search_nodes_with_bucket( cluster: Cluster, bucket_name: str, - wallet: str, + wallet: WalletInfo, shell: Shell, endpoint: str, ) -> list[ClusterNode]: diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py index 6c87cac..67c556d 100644 --- a/src/frostfs_testlib/steps/session_token.py +++ b/src/frostfs_testlib/steps/session_token.py @@ -4,13 +4,12 @@ import logging import os import uuid from dataclasses import dataclass -from enum import Enum from typing import Any, Optional from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG +from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo @@ -231,8 +230,7 @@ def get_object_signed_token( def create_session_token( shell: Shell, owner: str, - wallet_path: str, - wallet_password: str, + wallet: WalletInfo, rpc_endpoint: str, ) -> str: """ @@ -247,19 +245,18 @@ def create_session_token( The path to the generated session token file. """ session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC) + frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) frostfscli.session.create( rpc_endpoint=rpc_endpoint, address=owner, - wallet=wallet_path, - wallet_password=wallet_password, out=session_token, + wallet=wallet.path, ) return session_token @reporter.step("Sign Session Token") -def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str: +def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo) -> str: """ This function signs the session token by the given wallet. @@ -272,6 +269,6 @@ def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) - The path to the signed token. """ signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG) - frostfscli.util.sign_session_token(wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file) + frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + frostfscli.util.sign_session_token(session_token_file, signed_token_file) return signed_token_file diff --git a/src/frostfs_testlib/steps/storage_object.py b/src/frostfs_testlib/steps/storage_object.py index ce1bb94..4b4b2a6 100644 --- a/src/frostfs_testlib/steps/storage_object.py +++ b/src/frostfs_testlib/steps/storage_object.py @@ -30,14 +30,14 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust with reporter.step("Delete objects"): for storage_object in storage_objects: storage_object.tombstone = delete_object( - storage_object.wallet_file_path, + storage_object.wallet, storage_object.cid, storage_object.oid, shell=shell, endpoint=cluster.default_rpc_endpoint, ) verify_head_tombstone( - wallet_path=storage_object.wallet_file_path, + wallet=storage_object.wallet, cid=storage_object.cid, oid_ts=storage_object.tombstone, oid=storage_object.oid, @@ -52,7 +52,7 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust for storage_object in storage_objects: with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED): get_object( - storage_object.wallet_file_path, + storage_object.wallet, storage_object.cid, storage_object.oid, shell=shell, diff --git a/src/frostfs_testlib/steps/storage_policy.py b/src/frostfs_testlib/steps/storage_policy.py index d2202a4..acc113f 100644 --- a/src/frostfs_testlib/steps/storage_policy.py +++ b/src/frostfs_testlib/steps/storage_policy.py @@ -12,13 +12,15 @@ from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.complex_object_actions import get_last_object from frostfs_testlib.storage.cluster import StorageNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import string_utils logger = logging.getLogger("NeoLogger") +# TODO: Unused, remove or make use of @reporter.step("Get Object Copies") -def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: +def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ The function performs requests to all nodes of the container and finds out if they store a copy of the object. The procedure is @@ -43,7 +45,7 @@ def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: S @reporter.step("Get Simple Object Copies") -def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: +def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ To figure out the number of a simple object copies, only direct HEAD requests should be made to the every node of the container. @@ -72,7 +74,7 @@ def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell, node @reporter.step("Get Complex Object Copies") -def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: +def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ To figure out the number of a complex object copies, we firstly need to retrieve its Last object. We consider that the number of @@ -109,8 +111,7 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN nodes_list = [] for node in nodes: - wallet = node.get_wallet_path() - wallet_config = node.get_wallet_config_path() + wallet = WalletInfo.from_node(node) try: res = head_object( wallet, @@ -119,7 +120,6 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True, - wallet_config=wallet_config, ) if res is not None: logger.info(f"Found object {oid} on node {node}") @@ -131,9 +131,7 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN @reporter.step("Get Nodes Without Object") -def get_nodes_without_object( - wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> list[StorageNode]: +def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: """ The function returns list of nodes which do not store the given object. diff --git a/src/frostfs_testlib/steps/tombstone.py b/src/frostfs_testlib/steps/tombstone.py index b468c93..27f75d5 100644 --- a/src/frostfs_testlib/steps/tombstone.py +++ b/src/frostfs_testlib/steps/tombstone.py @@ -1,31 +1,23 @@ -import json import logging -from neo3.wallet import wallet - from frostfs_testlib import reporter from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo logger = logging.getLogger("NeoLogger") @reporter.step("Verify Head Tombstone") -def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): - header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] +def verify_head_tombstone(wallet: WalletInfo, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): + header = head_object(wallet, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] logger.info(f"Header Session OIDs is {s_oid}") logger.info(f"OID is {oid}") assert header["containerID"] == cid, "Tombstone Header CID is wrong" - - with open(wallet_path, "r") as file: - wlt_data = json.loads(file.read()) - wlt = wallet.Wallet.from_json(wlt_data, password="") - addr = wlt.accounts[0].address - - assert header["ownerID"] == addr, "Tombstone Owner ID is wrong" + assert header["ownerID"] == wallet.get_address_from_json(0), "Tombstone Owner ID is wrong" assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE" assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong" diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 5f2ed99..e713f02 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -1,6 +1,5 @@ import copy from datetime import datetime -from typing import Optional import frostfs_testlib.resources.optionals as optionals from frostfs_testlib import reporter @@ -10,7 +9,6 @@ from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.test_control import run_optionally @@ -23,7 +21,6 @@ class BackgroundLoadController: cluster_nodes: list[ClusterNode] nodes_under_load: list[ClusterNode] load_counter: int - loaders_wallet: WalletInfo load_summaries: dict endpoints: list[str] runner: ScenarioRunner @@ -34,7 +31,6 @@ class BackgroundLoadController: self, k6_dir: str, load_params: LoadParams, - loaders_wallet: WalletInfo, cluster_nodes: list[ClusterNode], nodes_under_load: list[ClusterNode], runner: ScenarioRunner, @@ -45,7 +41,6 @@ class BackgroundLoadController: self.cluster_nodes = cluster_nodes self.nodes_under_load = nodes_under_load self.load_counter = 1 - self.loaders_wallet = loaders_wallet self.runner = runner self.started = False self.load_reporters = [] @@ -64,10 +59,7 @@ class BackgroundLoadController: ) ), EndpointSelectionStrategy.FIRST: list( - set( - node_under_load.service(StorageNode).get_rpc_endpoint() - for node_under_load in self.nodes_under_load - ) + set(node_under_load.service(StorageNode).get_rpc_endpoint() for node_under_load in self.nodes_under_load) ), }, # for some reason xk6 appends http protocol on its own diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 69df675..9e07914 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -11,12 +11,13 @@ from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.hosting.interfaces import HostStatus from frostfs_testlib.plugins import load_all from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME +from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IpHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time @@ -413,12 +414,12 @@ class ClusterStateController: frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") @reporter.step("Set mode node to {status}") - def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None: + def set_mode_node(self, cluster_node: ClusterNode, wallet: WalletInfo, status: str, await_tick: bool = True) -> None: rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() - frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, cluster_node=cluster_node) - node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint, wallet=wallet).stdout) + frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, local_wallet=wallet, cluster_node=cluster_node) + node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint).stdout) with reporter.step("If status maintenance, then check that the option is enabled"): if node_netinfo.maintenance_mode_allowed == "false": @@ -437,12 +438,10 @@ class ClusterStateController: self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node) @wait_for_success(80, 8, title="Wait for storage status become {status}") - def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode): - frostfs_cli = FrostfsCli( - shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG - ) + def check_node_status(self, status: str, wallet: WalletInfo, cluster_node: ClusterNode): + frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) netmap = NetmapParser.snapshot_all_nodes( - frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint(), wallet=wallet).stdout + frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint()).stdout ) netmap = [node for node in netmap if cluster_node.host_ip == node.node] if status == "offline": @@ -450,7 +449,9 @@ class ClusterStateController: else: assert netmap[0].node_status == status.upper(), f"Node state - {netmap[0].node_status} != {status} expect" - def _get_cli(self, local_shell: Shell, cluster_node: ClusterNode) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: + def _get_cli( + self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode + ) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: # TODO Move to service config host = cluster_node.host service_config = host.get_service_config(cluster_node.storage_node.name) @@ -462,12 +463,8 @@ class ClusterStateController: wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - frostfs_adm = FrostfsAdm( - shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH - ) - frostfs_cli = FrostfsCli( - shell=local_shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG - ) + frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) + frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) frostfs_cli_remote = FrostfsCli( shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, @@ -511,9 +508,7 @@ class ClusterStateController: options = CommandOptions(check=False) return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code - @retry( - max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online" - ) + @retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online") def _wait_for_host_online(self, node: ClusterNode): try: ping_result = self._ping_host(node) @@ -524,9 +519,7 @@ class ClusterStateController: logger.warning(f"Host ping fails with error {err}") return HostStatus.OFFLINE - @retry( - max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline" - ) + @retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline") def _wait_for_host_offline(self, node: ClusterNode): try: ping_result = self._ping_host(node) diff --git a/src/frostfs_testlib/storage/dataclasses/acl.py b/src/frostfs_testlib/storage/dataclasses/acl.py index 1330618..362dee9 100644 --- a/src/frostfs_testlib/storage/dataclasses/acl.py +++ b/src/frostfs_testlib/storage/dataclasses/acl.py @@ -1,8 +1,8 @@ import logging from dataclasses import dataclass -from enum import Enum from typing import Any, Dict, List, Optional, Union +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import wallet_utils @@ -65,11 +65,7 @@ class EACLFilters: def __str__(self): return ",".join( - [ - f"{filter.header_type.value}:" - f"{filter.key}{filter.match_type.value}{filter.value}" - for filter in self.filters - ] + [f"{filter.header_type.value}:" f"{filter.key}{filter.match_type.value}{filter.value}" for filter in self.filters] if self.filters else [] ) @@ -84,7 +80,7 @@ class EACLPubKey: class EACLRule: operation: Optional[EACLOperation] = None access: Optional[EACLAccess] = None - role: Optional[Union[EACLRole, str]] = None + role: Optional[Union[EACLRole, WalletInfo]] = None filters: Optional[EACLFilters] = None def to_dict(self) -> Dict[str, Any]: @@ -96,9 +92,9 @@ class EACLRule: } def __str__(self): - role = ( - self.role.value - if isinstance(self.role, EACLRole) - else f'pubkey:{wallet_utils.get_wallet_public_key(self.role, "")}' - ) + role = "" + if isinstance(self.role, EACLRole): + role = self.role.value + if isinstance(self.role, WalletInfo): + role = f"pubkey:{wallet_utils.get_wallet_public_key(self.role.path, self.role.password)}" return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}' diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 63a3cf2..f4d729d 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,6 +1,7 @@ from dataclasses import dataclass from typing import Optional +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum @@ -19,7 +20,7 @@ class LockObjectInfo(ObjectRef): @dataclass class StorageObjectInfo(ObjectRef): size: Optional[int] = None - wallet_file_path: Optional[str] = None + wallet: Optional[WalletInfo] = None file_path: Optional[str] = None file_hash: Optional[str] = None attributes: Optional[list[dict[str, str]]] = None diff --git a/src/frostfs_testlib/storage/dataclasses/wallet.py b/src/frostfs_testlib/storage/dataclasses/wallet.py index 1d66c4b..d053d29 100644 --- a/src/frostfs_testlib/storage/dataclasses/wallet.py +++ b/src/frostfs_testlib/storage/dataclasses/wallet.py @@ -1,13 +1,15 @@ import json import logging import os -import uuid from dataclasses import dataclass from typing import Optional -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS +import yaml + +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.cluster import Cluster, NodeBase +from frostfs_testlib.storage.cluster import NodeBase from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet logger = logging.getLogger("frostfs.testlib.utils") @@ -21,9 +23,13 @@ class WalletInfo: @staticmethod def from_node(node: NodeBase): - return WalletInfo( - node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path() - ) + wallet_path = node.get_wallet_path() + wallet_password = node.get_wallet_password() + wallet_config_file = os.path.join(ASSETS_DIR, os.path.basename(node.get_wallet_config_path())) + with open(wallet_config_file, "w") as file: + file.write(yaml.dump({"wallet": wallet_path, "password": wallet_password})) + + return WalletInfo(wallet_path, wallet_password, wallet_config_file) def get_address(self) -> str: """ @@ -47,22 +53,17 @@ class WalletInfo: """ with open(self.path, "r") as wallet: wallet_json = json.load(wallet) - assert abs(account_id) + 1 <= len( - wallet_json["accounts"] - ), f"There is no index '{account_id}' in wallet: {wallet_json}" + assert abs(account_id) + 1 <= len(wallet_json["accounts"]), f"There is no index '{account_id}' in wallet: {wallet_json}" return wallet_json["accounts"][account_id]["address"] class WalletFactory: - def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None: + def __init__(self, wallets_dir: str, shell: Shell) -> None: self.shell = shell self.wallets_dir = wallets_dir - self.cluster = cluster - def create_wallet( - self, file_name: Optional[str] = None, password: Optional[str] = None - ) -> WalletInfo: + def create_wallet(self, file_name: str, password: Optional[str] = None) -> WalletInfo: """ Creates new default wallet. @@ -74,8 +75,6 @@ class WalletFactory: WalletInfo object of new wallet. """ - if file_name is None: - file_name = str(uuid.uuid4()) if password is None: password = "" @@ -85,6 +84,8 @@ class WalletFactory: init_wallet(wallet_path, password) with open(wallet_config_path, "w") as config_file: - config_file.write(f'password: "{password}"') + config_file.write(f'wallet: {wallet_path}\npassword: "{password}"') + + reporter.attach(wallet_path, os.path.basename(wallet_path)) return WalletInfo(wallet_path, password, wallet_config_path) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 2c1f4ab..91b1d98 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -4,7 +4,6 @@ import re from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.testing.parallel import parallel @@ -18,7 +17,7 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: out = shell.exec(f"{binary} --version").stdout versions[binary] = _parse_version(out) - frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout) try: @@ -36,7 +35,7 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: def parallel_binary_verions(host: Host) -> dict[str, str]: versions_by_host = {} - + binary_path_by_name = {} # Maps binary name to executable path for service_config in host.config.services: exec_path = service_config.attributes.get("exec_path") @@ -65,7 +64,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} versions_by_host[host.config.address] = versions_at_host return versions_by_host - + def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: versions_by_host = {} @@ -83,26 +82,27 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: for host, binary_versions in versions_by_host.items(): for name, binary in binary_versions.items(): version = binary["version"] - if not cheak_versions.get(f'{name[:-2]}', None): - captured_version = cheak_versions.get(f'{name[:-2]}',{}).get(host, {}).get(captured_version) - cheak_versions[f'{name[:-2]}'] = {host: {version: name}} + if not cheak_versions.get(f"{name[:-2]}", None): + captured_version = cheak_versions.get(f"{name[:-2]}", {}).get(host, {}).get(captured_version) + cheak_versions[f"{name[:-2]}"] = {host: {version: name}} else: - captured_version = list(cheak_versions.get(f'{name[:-2]}',{}).get(previous_host).keys())[0] - cheak_versions[f'{name[:-2]}'].update({host:{version:name}}) - + captured_version = list(cheak_versions.get(f"{name[:-2]}", {}).get(previous_host).keys())[0] + cheak_versions[f"{name[:-2]}"].update({host: {version: name}}) + if captured_version and captured_version != version: exception.add(name[:-2]) - + versions[name] = {"version": version, "check": binary["check"]} previous_host = host if exception: for i in exception: for host in versions_by_host.keys(): for version, name in cheak_versions.get(i).get(host).items(): - exсeptions.append(f'Binary {name} has inconsistent version {version} on host {host}') - exсeptions.append('\n') + exсeptions.append(f"Binary {name} has inconsistent version {version} on host {host}") + exсeptions.append("\n") return versions, exсeptions + def _parse_version(version_output: str) -> str: version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) return version.group(1).strip() if version else version_output diff --git a/src/frostfs_testlib/utils/wallet_utils.py b/src/frostfs_testlib/utils/wallet_utils.py index 0c5ab1a..d2b4229 100644 --- a/src/frostfs_testlib/utils/wallet_utils.py +++ b/src/frostfs_testlib/utils/wallet_utils.py @@ -9,6 +9,16 @@ from neo3.wallet import wallet as neo3_wallet logger = logging.getLogger("frostfs.testlib.utils") +def __fix_wallet_schema(wallet: dict) -> None: + # Temporary function to fix wallets that do not conform to the schema + # TODO: get rid of it once issue is solved + if "name" not in wallet: + wallet["name"] = None + for account in wallet["accounts"]: + if "extra" not in account: + account["extra"] = None + + def init_wallet(wallet_path: str, wallet_password: str): """ Create new wallet and new account. @@ -33,29 +43,15 @@ def get_last_address_from_wallet(wallet_path: str, wallet_password: str): Returns: The address for the wallet. """ - with open(wallet_path) as wallet_file: - wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) + wallet = load_wallet(wallet_path, wallet_password) address = wallet.accounts[-1].address logger.info(f"got address: {address}") return address def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str: - def __fix_wallet_schema(wallet: dict) -> None: - # Temporary function to fix wallets that do not conform to the schema - # TODO: get rid of it once issue is solved - if "name" not in wallet: - wallet["name"] = None - for account in wallet["accounts"]: - if "extra" not in account: - account["extra"] = None - - # Get public key from wallet file - with open(wallet_path, "r") as file: - wallet_content = json.load(file) - __fix_wallet_schema(wallet_content) - wallet_from_json = neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password) - public_key_hex = str(wallet_from_json.accounts[0].public_key) + wallet = load_wallet(wallet_path, wallet_password) + public_key_hex = str(wallet.accounts[0].public_key) # Convert public key to specified format if format == "hex": @@ -69,7 +65,9 @@ def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = raise ValueError(f"Invalid public key format: {format}") -def load_wallet(path: str, passwd: str = "") -> neo3_wallet.Wallet: - with open(path, "r") as wallet_file: - wlt_data = wallet_file.read() - return neo3_wallet.Wallet.from_json(json.loads(wlt_data), password=passwd) +def load_wallet(wallet_path: str, wallet_password: str) -> neo3_wallet.Wallet: + with open(wallet_path) as wallet_file: + wallet_content = json.load(wallet_file) + + __fix_wallet_schema(wallet_content) + return neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password) From b323bcfd0ada57d1652fb44ec95a7cc43eaf64ba Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 14 Mar 2024 14:27:31 +0300 Subject: [PATCH 129/274] [#192] Fix param --- src/frostfs_testlib/credentials/wallet_factory_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/credentials/wallet_factory_provider.py b/src/frostfs_testlib/credentials/wallet_factory_provider.py index 4d1ab7a..d00020f 100644 --- a/src/frostfs_testlib/credentials/wallet_factory_provider.py +++ b/src/frostfs_testlib/credentials/wallet_factory_provider.py @@ -10,5 +10,5 @@ class WalletFactoryProvider(GrpcCredentialsProvider): @reporter.step("Init gRPC Credentials using wallet generation") def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: wallet_factory = WalletFactory(ASSETS_DIR, LocalShell()) - user.wallet = wallet_factory.create_wallet(file_name=user, password=DEFAULT_WALLET_PASS) + user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS) return user.wallet From 0e247c2ff26327d1af03f2d21da2ff843888e694 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 14 Mar 2024 16:39:20 +0300 Subject: [PATCH 130/274] [#193] Fix auth provider Signed-off-by: Andrey Berezin --- src/frostfs_testlib/credentials/authmate_s3_provider.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/credentials/authmate_s3_provider.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py index 6343b5a..66c5015 100644 --- a/src/frostfs_testlib/credentials/authmate_s3_provider.py +++ b/src/frostfs_testlib/credentials/authmate_s3_provider.py @@ -22,7 +22,7 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider): gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] # unique short bucket name - bucket = f"bucket_{hex(int(datetime.now().timestamp()*1000000))}" + bucket = f"bucket-{hex(int(datetime.now().timestamp()*1000000))}" frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) issue_secret_output = frostfs_authmate.secret.issue( @@ -40,7 +40,7 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider): ) cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) - containers_list = list_containers(wallet.path, shell, endpoint) + containers_list = list_containers(wallet, shell, endpoint) assert cid in containers_list, f"Expected cid {cid} in {containers_list}" user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key) From f2bded64e4a7672ae038b09544567830cc76a81f Mon Sep 17 00:00:00 2001 From: Liza Date: Mon, 4 Mar 2024 17:01:24 +0300 Subject: [PATCH 131/274] [#189] Add setup step to check binaries versions Signed-off-by: Liza --- src/frostfs_testlib/utils/version_utils.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 91b1d98..f1b7e37 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -1,6 +1,7 @@ import logging import re +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE @@ -10,6 +11,7 @@ from frostfs_testlib.testing.parallel import parallel logger = logging.getLogger("NeoLogger") +@reporter.step("Get local binaries versions") def get_local_binaries_versions(shell: Shell) -> dict[str, str]: versions = {} @@ -29,6 +31,7 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: out = shell.exec("aws --version").stdout out_lines = out.split("\n") versions["AWS"] = out_lines[0] if out_lines else "Unknown" + logger.info(f"Local binaries version: {out_lines[0]}") return versions @@ -66,6 +69,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: return versions_by_host +@reporter.step("Get remote binaries versions") def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: versions_by_host = {} future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) @@ -94,6 +98,9 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: versions[name] = {"version": version, "check": binary["check"]} previous_host = host + logger.info( + "Remote binaries versions:\n" + "\n".join([f"{key} ver: {value['version']}" for key, value in versions.items()]) + ) if exception: for i in exception: for host in versions_by_host.keys(): From 9c508c4f66f615c44f911f4de57492fc8e880ad6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 15 Mar 2024 17:44:18 +0300 Subject: [PATCH 132/274] [#194] Fix shards watcher CLI usage Signed-off-by: Andrey Berezin --- src/frostfs_testlib/storage/controllers/shards_watcher.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index ad07ff4..3d313f1 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -79,9 +79,7 @@ class ShardsWatcher: assert self._is_shard_present(shard_id) shards_with_new_errors = self.get_shards_with_new_errors() - assert ( - shard_id in shards_with_new_errors - ), f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" + assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" @wait_for_success(300, 5) def await_for_shards_have_no_new_errors(self): @@ -97,6 +95,8 @@ class ShardsWatcher: response = shards_cli.list( endpoint=self.storage_node.get_control_endpoint(), + wallet=self.storage_node.get_remote_wallet_path(), + wallet_password=self.storage_node.get_wallet_password(), json_mode=True, ) @@ -109,6 +109,8 @@ class ShardsWatcher: ) return shards_cli.set_mode( endpoint=self.storage_node.get_control_endpoint(), + wallet=self.storage_node.get_remote_wallet_path(), + wallet_password=self.storage_node.get_wallet_password(), mode=mode, id=[shard_id], clear_errors=clear_errors, From 11487e983da6e1cc2fae8ea266dac0960a89010d Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 18 Mar 2024 20:09:08 +0300 Subject: [PATCH 133/274] [#196] Removed profile name from Boto3 client --- src/frostfs_testlib/s3/boto3_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index bdb177e..cb1ec28 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -46,7 +46,7 @@ class Boto3ClientWrapper(S3ClientWrapper): self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" ) -> None: self.boto3_client: S3Client = None - self.session = boto3.Session(profile_name=profile) + self.session = boto3.Session() self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, From 2dc5aa8a1eb5f2a6900af6836e068d512d2f93a0 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 19 Mar 2024 15:47:18 +0300 Subject: [PATCH 134/274] [#195] Update netmap parser and status check message Signed-off-by: Andrey Berezin --- src/frostfs_testlib/cli/netmap_parser.py | 11 +++--- .../controllers/cluster_state_controller.py | 35 +++++++++---------- .../dataclasses/storage_object_info.py | 4 +-- 3 files changed, 26 insertions(+), 24 deletions(-) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 6d2eaaa..94d12b8 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -44,7 +44,7 @@ class NetmapParser: regexes = { "node_id": r"\d+: (?P\w+)", "node_data_ips": r"(?P/ip4/.+?)$", - "node_status": r"(?PONLINE|OFFLINE)", + "node_status": r"(?PONLINE|MAINTENANCE|OFFLINE)", "cluster_name": r"ClusterName: (?P\w+)", "continent": r"Continent: (?P\w+)", "country": r"Country: (?P\w+)", @@ -62,14 +62,17 @@ class NetmapParser: for node in netmap_nodes: for key, regex in regexes.items(): search_result = re.search(regex, node, flags=re.MULTILINE) + if search_result == None: + result_netmap[key] = None + continue if key == "node_data_ips": result_netmap[key] = search_result[key].strip().split(" ") continue if key == "external_address": result_netmap[key] = search_result[key].strip().split(",") continue - if search_result == None: - result_netmap[key] = None + if key == "node_status": + result_netmap[key] = NodeStatus(search_result[key].strip().lower()) continue result_netmap[key] = search_result[key].strip() diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 9e07914..03648f5 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -17,6 +17,7 @@ from frostfs_testlib.steps.network import IpHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success @@ -413,41 +414,39 @@ class ClusterStateController: ) frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") - @reporter.step("Set mode node to {status}") - def set_mode_node(self, cluster_node: ClusterNode, wallet: WalletInfo, status: str, await_tick: bool = True) -> None: + @reporter.step("Set node status to {status} in CSC") + def set_node_status(self, cluster_node: ClusterNode, wallet: WalletInfo, status: NodeStatus, await_tick: bool = True) -> None: rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() - frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, local_wallet=wallet, cluster_node=cluster_node) - node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint).stdout) + frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(self.shell, wallet, cluster_node) + node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint).stdout) - with reporter.step("If status maintenance, then check that the option is enabled"): - if node_netinfo.maintenance_mode_allowed == "false": - frostfs_adm.morph.set_config(set_key_value="MaintenanceModeAllowed=true") + if node_netinfo.maintenance_mode_allowed == "false": + with reporter.step("Enable maintenance mode"): + frostfs_adm.morph.set_config("MaintenanceModeAllowed=true") - with reporter.step(f"Change the status to {status}"): - frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status=status) + with reporter.step(f"Set node status to {status} using FrostfsCli"): + frostfs_cli_remote.control.set_status(control_endpoint, status.value) if not await_tick: return - with reporter.step("Tick 1 epoch, and await 2 block"): + with reporter.step("Tick 1 epoch and await 2 block"): frostfs_adm.morph.force_new_epoch() time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) - self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node) + self.await_node_status(status, wallet, cluster_node) - @wait_for_success(80, 8, title="Wait for storage status become {status}") - def check_node_status(self, status: str, wallet: WalletInfo, cluster_node: ClusterNode): + @wait_for_success(80, 8, title="Wait for node status become {status}") + def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode): frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) - netmap = NetmapParser.snapshot_all_nodes( - frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint()).stdout - ) + netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(cluster_node.storage_node.get_rpc_endpoint()).stdout) netmap = [node for node in netmap if cluster_node.host_ip == node.node] - if status == "offline": + if status == NodeStatus.OFFLINE: assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" else: - assert netmap[0].node_status == status.upper(), f"Node state - {netmap[0].node_status} != {status} expect" + assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" def _get_cli( self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index f4d729d..28fdaa5 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -28,7 +28,7 @@ class StorageObjectInfo(ObjectRef): locks: Optional[list[LockObjectInfo]] = None -class ModeNode(HumanReadableEnum): +class NodeStatus(HumanReadableEnum): MAINTENANCE: str = "maintenance" ONLINE: str = "online" OFFLINE: str = "offline" @@ -37,7 +37,7 @@ class ModeNode(HumanReadableEnum): @dataclass class NodeNetmapInfo: node_id: str = None - node_status: ModeNode = None + node_status: NodeStatus = None node_data_ips: list[str] = None cluster_name: str = None continent: str = None From 653621fb7eebce0fb6a9cf558eab1365d52012e0 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 20 Mar 2024 17:06:38 +0300 Subject: [PATCH 135/274] [#197] Allow config_dir for local scenario --- src/frostfs_testlib/load/load_config.py | 42 +++++++------------------ 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index b859971..2a546c4 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -177,13 +177,9 @@ class Preset: @dataclass class PrometheusParams: # Prometheus server URL - server_url: Optional[str] = metadata_field( - all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False - ) + server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False) # Prometheus trend stats - trend_stats: Optional[str] = metadata_field( - all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False - ) + trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False) # Additional tags metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False) @@ -246,9 +242,7 @@ class LoadParams: # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. - load_time: Optional[int] = metadata_field( - all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds - ) + load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds) # Object size in KB for load and preset. object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) # For read operations, controls from which set get objects to read @@ -266,9 +260,7 @@ class LoadParams: # sleep for the remainder of the time until the specified minimum duration is reached. min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False) # Prepare/cut objects locally on client before sending - prepare_locally: Optional[bool] = metadata_field( - [LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False - ) + prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False) # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) @@ -298,35 +290,25 @@ class LoadParams: delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True) # Amount of preAllocatedVUs for write operations. - preallocated_writers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True - ) + preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True) # Amount of maxVUs for write operations. max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True) # Amount of preAllocatedVUs for read operations. - preallocated_readers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True - ) + preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True) # Amount of maxVUs for read operations. max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True) # Amount of preAllocatedVUs for read operations. - preallocated_deleters: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True - ) + preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True) # Amount of maxVUs for delete operations. max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True) # Multipart # Number of parts to upload in parallel - writers_multipart: Optional[int] = metadata_field( - [LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True - ) + writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True) # part size must be greater than (5 MB) - write_object_part_size: Optional[int] = metadata_field( - [LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False - ) + write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False) # Period of time to apply the rate value. time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False) @@ -341,7 +323,7 @@ class LoadParams: # Config file location (filled automatically) config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False) # Config directory location (filled automatically) - config_dir: Optional[str] = metadata_field([LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False) + config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False) def set_id(self, load_id): self.load_id = load_id @@ -474,9 +456,7 @@ class LoadParams: static_params = [f"{load_type_str}"] dynamic_params = [ - f"{meta_field.name}={meta_field.value}" - for meta_field in self._get_applicable_fields() - if meta_field.metadata["string_repr"] + f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"] ] params = ", ".join(static_params + dynamic_params) From 076e444f84edc546f8960cb0e1f4191803f91a13 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 22 Mar 2024 12:19:53 +0300 Subject: [PATCH 136/274] [#198] Check only data disks for local safe-stopper Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/k6.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index caf3cfe..3e62a16 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -4,6 +4,7 @@ import math import os from dataclasses import dataclass from datetime import datetime +from threading import Event from time import sleep from typing import Any from urllib.parse import urlparse @@ -69,7 +70,7 @@ class K6: self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id) def _get_fill_percents(self): - fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n") + fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n") return [line.split() for line in fill_percents][:-1] def check_fill_percent(self): @@ -149,7 +150,7 @@ class K6: with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): self._k6_process.start() - def wait_until_finished(self, event, soft_timeout: int = 0) -> None: + def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None: with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): if self.load_params.scenario == LoadScenario.VERIFY: timeout = self.load_params.verify_time or 0 From 9cfaf1a6187798900ca3069518b1f734a75afd3d Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 2 Apr 2024 19:40:18 +0300 Subject: [PATCH 137/274] [#201] Add more time for node return Signed-off-by: Andrey Berezin --- src/frostfs_testlib/steps/node_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index dd38279..ece674b 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -263,7 +263,7 @@ def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[ @reporter.step("Wait for node {node} is ready") def wait_for_node_to_be_ready(node: StorageNode) -> None: - timeout, attempts = 30, 6 + timeout, attempts = 60, 15 for _ in range(attempts): try: health_check = storage_node_healthcheck(node) From 338584069d8a99113e9e293cd4563ebb4ea45aa6 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 5 Mar 2024 12:51:15 +0300 Subject: [PATCH 138/274] [#190] Add PlacementPolicy dataclass Allow to parametrize tests with placement policy. Signed-off-by: Evgenii Stratonikov --- src/frostfs_testlib/storage/dataclasses/policy.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 src/frostfs_testlib/storage/dataclasses/policy.py diff --git a/src/frostfs_testlib/storage/dataclasses/policy.py b/src/frostfs_testlib/storage/dataclasses/policy.py new file mode 100644 index 0000000..872ee05 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/policy.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass + + +@dataclass +class PlacementPolicy: + name: str + value: str + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return self.__str__() From e2a170d66e827d1d41bd201ab93ba6abfba5920b Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 5 Mar 2024 12:52:19 +0300 Subject: [PATCH 139/274] [#190] Introduce default EC placement policy The default policy which is similar to REP 2, but uses EC instead. Signed-off-by: Evgenii Stratonikov --- src/frostfs_testlib/steps/cli/container.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index fc643e2..fa739a8 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -95,6 +95,7 @@ class StorageContainer: DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" +DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" @reporter.step("Create Container") From 6629b9bbaa5dbfc87f48597639d725d9a669cb67 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 4 Apr 2024 11:55:27 +0300 Subject: [PATCH 140/274] [#202] .forgejo: Replace old DCO action Signed-off-by: Evgenii Stratonikov --- .forgejo/workflows/dco.yml | 21 +++++++++++++++++++++ .github/CODEOWNERS | 1 - .github/workflows/dco.yml | 21 --------------------- 3 files changed, 21 insertions(+), 22 deletions(-) create mode 100644 .forgejo/workflows/dco.yml delete mode 100644 .github/CODEOWNERS delete mode 100644 .github/workflows/dco.yml diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml new file mode 100644 index 0000000..9aa0d31 --- /dev/null +++ b/.forgejo/workflows/dco.yml @@ -0,0 +1,21 @@ +name: DCO action +on: [pull_request] + +jobs: + dco: + name: DCO + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: '1.21' + + - name: Run commit format checker + uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 + with: + from: 'origin/${{ github.event.pull_request.base.ref }}' diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 1422062..0000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml deleted file mode 100644 index 40ed8fc..0000000 --- a/.github/workflows/dco.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: DCO check - -on: - pull_request: - branches: - - master - -jobs: - commits_check_job: - runs-on: ubuntu-latest - name: Commits Check - steps: - - name: Get PR Commits - id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@master - with: - token: ${{ secrets.GITHUB_TOKEN }} - - name: DCO Check - uses: tim-actions/dco@master - with: - commits: ${{ steps.get-pr-commits.outputs.commits }} From 863e74f16153ec95b3d639efde879355b12806ef Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 8 Apr 2024 14:26:50 +0300 Subject: [PATCH 141/274] [#204] Fix custom_registry for verify scenario Signed-off-by: Andrey Berezin --- .../storage/controllers/background_load_controller.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index e713f02..a8588ff 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -187,6 +187,7 @@ class BackgroundLoadController: read_from=self.load_params.read_from, registry_file=self.load_params.registry_file, verify_time=self.load_params.verify_time, + custom_registry=self.load_params.custom_registry, load_type=self.load_params.load_type, load_id=self.load_params.load_id, vu_init_time=0, @@ -196,6 +197,9 @@ class BackgroundLoadController: setup_timeout="1s", ) + if self.verification_params.custom_registry: + self.verification_params.registry_file = self.load_params.custom_registry + if self.verification_params.verify_time is None: raise RuntimeError("verify_time should not be none") From 65ec50391ef3227635a465aead08421e27cab298 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 23 Nov 2023 15:53:03 +0300 Subject: [PATCH 142/274] Interfaces for IAM in S3 client --- src/frostfs_testlib/s3/aws_cli_client.py | 584 ++++++++++++++++++++++- src/frostfs_testlib/s3/boto3_client.py | 315 +++++++++++- src/frostfs_testlib/s3/interfaces.py | 153 +++++- src/frostfs_testlib/utils/cli_utils.py | 4 +- 4 files changed, 1039 insertions(+), 17 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index e4f2bb2..470e7a3 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -29,13 +29,17 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Configure S3 client (aws cli)") def __init__( - self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.s3gate_endpoint = s3gate_endpoint + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key self.profile = profile self.local_shell = LocalShell() + self.region = region + self.iam_endpoint = None try: - _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key) + _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") self.local_shell.exec( f"aws configure set retry_mode {RETRY_MODE} --profile {profile}", @@ -43,10 +47,14 @@ class AwsCliClient(S3ClientWrapper): except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err - @reporter.step("Set endpoint S3 to {s3gate_endpoint}") + @reporter.step("Set S3 endpoint to {s3gate_endpoint}") def set_endpoint(self, s3gate_endpoint: str): self.s3gate_endpoint = s3gate_endpoint + @reporter.step("Set IAM endpoint to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + self.iam_endpoint = iam_endpoint + @reporter.step("Create bucket S3") def create_bucket( self, @@ -565,12 +573,13 @@ class AwsCliClient(S3ClientWrapper): self.local_shell.exec(cmd) @reporter.step("Put object tagging") - def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " - f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"{version} --tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -586,10 +595,11 @@ class AwsCliClient(S3ClientWrapper): return response.get("TagSet") @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str) -> None: + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " - f"--key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -750,3 +760,563 @@ class AwsCliClient(S3ClientWrapper): json_output = json.loads(output[output.index("{") :]) return json_output + + # IAM METHODS # + # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) + + @reporter.step("Adds the specified user to the specified group") + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Attaches the specified managed policy to the specified IAM group") + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Attaches the specified managed policy to the specified user") + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + def iam_create_access_key(self, user_name: Optional[str] = None) -> dict: + cmd = ( + f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + if user_name: + cmd += f" --user-name {user_name}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + access_key_id = response["AccessKey"].get("AccessKeyId") + secret_access_key = response["AccessKey"].get("SecretAccessKey") + assert access_key_id, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + + return access_key_id, secret_access_key + + + @reporter.step("Creates a new group") + def iam_create_group(self, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Group"), f"Expected Group in response:\n{response}" + assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + + @reporter.step("Creates a new managed policy for your AWS account") + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam create-policy --endpoint {self.iam_endpoint}" + f" --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + + @reporter.step("Creates a new IAM user for your AWS account") + def iam_create_user(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + + @reporter.step("Deletes the access key pair associated with the specified IAM user") + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Deletes the specified IAM group") + def iam_delete_group(self, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Deletes the specified managed policy") + def iam_delete_policy(self, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Deletes the specified IAM user") + def iam_delete_user(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + + return response + + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Removes the specified managed policy from the specified IAM group") + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Removes the specified managed policy from the specified user") + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Returns a list of IAM users that are in the specified IAM group") + def iam_get_group(self, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Users" in response.keys(), f"Expected Users in response:\n{response}" + assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Retrieves information about the specified managed policy") + def iam_get_policy(self, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + + @reporter.step("Retrieves information about the specified version of the specified managed policy") + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" + assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" + + return response + + + @reporter.step("Retrieves information about the specified IAM user") + def iam_get_user(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("UserName"), f"Expected User in response:\n{response}" + + return response + + + @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + def iam_list_access_keys(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Lists all managed policies that are attached to the specified IAM group") + def iam_list_attached_group_policies(self, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + + return response + + + @reporter.step("Lists all managed policies that are attached to the specified IAM user") + def iam_list_attached_user_policies(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + + return response + + + @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" + assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" + + return response + + + @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + def iam_list_group_policies(self, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM groups") + def iam_list_groups(self) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Groups"), f"Expected Groups in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + def iam_list_groups_for_user(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Groups"), f"Expected Groups in response:\n{response}" + + return response + + + @reporter.step("Lists all the managed policies that are available in your AWS account") + def iam_list_policies(self) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert 'Policies' in response.keys(), f"Expected Policies in response:\n{response}" + + return response + + + @reporter.step("Lists information about the versions of the specified managed policy") + def iam_list_policy_versions(self, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Versions"), f"Expected Versions in response:\n{response}" + + return response + + + @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + def iam_list_user_policies(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM users") + def iam_list_users(self) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Users" in response.keys(), f"Expected Users in response:\n{response}" + + return response + + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}" + f" --group-name {group_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}" + f" --user-name {user_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Removes the specified user from the specified group") + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam remove-user-from-group --endpoint {self.iam_endpoint}" + f" --group-name {group_name} --user-name {user_name}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Updates the name and/or the path of the specified IAM group") + def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + cmd = ( + f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if new_name: + cmd += f" --new-group-name {new_name}" + if new_path: + cmd += f" --new-path {new_path}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Updates the name and/or the path of the specified IAM user") + def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + cmd = ( + f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if new_name: + cmd += f" --new-user-name {new_name}" + if new_path: + cmd += f" --new-path {new_path}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index cb1ec28..46cfe4b 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -18,6 +18,9 @@ from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, R from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.utils.cli_utils import log_command_execution +# TODO: Refactor this code to use shell instead of _cmd_run +from frostfs_testlib.utils.cli_utils import _configure_aws_cli + logger = logging.getLogger("NeoLogger") # Disable warnings on self-signed certificate which the @@ -43,10 +46,11 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Configure S3 client (boto3)") @report_error def __init__( - self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.boto3_client: S3Client = None self.session = boto3.Session() + self.region = region self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, @@ -56,6 +60,7 @@ class Boto3ClientWrapper(S3ClientWrapper): self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.s3gate_endpoint: str = "" + self.boto3_iam_client: S3Client = None self.set_endpoint(s3gate_endpoint) @reporter.step("Set endpoint S3 to {s3gate_endpoint}") @@ -69,11 +74,23 @@ class Boto3ClientWrapper(S3ClientWrapper): service_name="s3", aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, + region_name=self.region, config=self.config, endpoint_url=s3gate_endpoint, verify=False, ) + + @reporter.step("Set endpoint IAM to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + self.boto3_iam_client = self.session.client( + service_name="iam", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + endpoint_url=iam_endpoint, + verify=False,) + + def _to_s3_param(self, param: str): replacement_map = { "Acl": "ACL", @@ -118,7 +135,7 @@ class Boto3ClientWrapper(S3ClientWrapper): s3_bucket = self.boto3_client.create_bucket(**params) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) - sleep(S3_SYNC_WAIT_TIME) + sleep(S3_SYNC_WAIT_TIME * 10) return bucket @reporter.step("List buckets S3") @@ -139,7 +156,7 @@ class Boto3ClientWrapper(S3ClientWrapper): def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) log_command_execution("S3 Delete bucket result", response) - sleep(S3_SYNC_WAIT_TIME) + sleep(S3_SYNC_WAIT_TIME * 10) @reporter.step("Head bucket S3") @report_error @@ -355,7 +372,7 @@ class Boto3ClientWrapper(S3ClientWrapper): } response = self.boto3_client.delete_object(**params) log_command_execution("S3 Delete object result", response) - sleep(S3_SYNC_WAIT_TIME) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Delete objects S3") @@ -366,7 +383,7 @@ class Boto3ClientWrapper(S3ClientWrapper): assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' - sleep(S3_SYNC_WAIT_TIME) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Delete object versions S3") @@ -592,10 +609,10 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Put object tagging") @report_error - def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging) + response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id) log_command_execution("S3 Put object tagging", response) @reporter.step("Get object tagging") @@ -654,3 +671,287 @@ class Boto3ClientWrapper(S3ClientWrapper): raise NotImplementedError("Cp is not supported for boto3 client") # END OBJECT METHODS # + + + # IAM METHODS # + # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) + + @reporter.step("Adds the specified user to the specified group") + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name) + return response + + + @reporter.step("Attaches the specified managed policy to the specified IAM group") + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: + response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Attaches the specified managed policy to the specified user") + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + def iam_create_access_key(self, user_name: str) -> dict: + response = self.boto3_iam_client.create_access_key(UserName=user_name) + + access_key_id = response["AccessKey"].get("AccessKeyId") + secret_access_key = response["AccessKey"].get("SecretAccessKey") + assert access_key_id, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + + return access_key_id, secret_access_key + + + @reporter.step("Creates a new group") + def iam_create_group(self, group_name: str) -> dict: + response = self.boto3_iam_client.create_group(GroupName=group_name) + assert response.get("Group"), f"Expected Group in response:\n{response}" + assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + + @reporter.step("Creates a new managed policy for your AWS account") + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + + @reporter.step("Creates a new IAM user for your AWS account") + def iam_create_user(self, user_name: str) -> dict: + response = self.boto3_iam_client.create_user(UserName=user_name) + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + + @reporter.step("Deletes the access key pair associated with the specified IAM user") + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name) + return response + + + @reporter.step("Deletes the specified IAM group") + def iam_delete_group(self, group_name: str) -> dict: + response = self.boto3_iam_client.delete_group(GroupName=group_name) + return response + + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name) + return response + + + @reporter.step("Deletes the specified managed policy") + def iam_delete_policy(self, policy_arn: str) -> dict: + response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) + return response + + + @reporter.step("Deletes the specified IAM user") + def iam_delete_user(self, user_name: str) -> dict: + response = self.boto3_iam_client.delete_user(UserName=user_name) + return response + + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name) + return response + + + @reporter.step("Removes the specified managed policy from the specified IAM group") + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Removes the specified managed policy from the specified user") + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Returns a list of IAM users that are in the specified IAM group") + def iam_get_group(self, group_name: str) -> dict: + response = self.boto3_iam_client.get_group(GroupName=group_name) + assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) + + return response + + + @reporter.step("Retrieves information about the specified managed policy") + def iam_get_policy(self, policy_arn: str) -> dict: + response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + + @reporter.step("Retrieves information about the specified version of the specified managed policy") + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id) + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" + assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" + + return response + + + @reporter.step("Retrieves information about the specified IAM user") + def iam_get_user(self, user_name: str) -> dict: + response = self.boto3_iam_client.get_user(UserName=user_name) + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) + assert response.get("UserName"), f"Expected UserName in response:\n{response}" + + return response + + + @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + def iam_list_access_keys(self, user_name: str) -> dict: + response = self.boto3_iam_client.list_access_keys(UserName=user_name) + + return response + + + @reporter.step("Lists all managed policies that are attached to the specified IAM group") + def iam_list_attached_group_policies(self, group_name: str) -> dict: + response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + + return response + + + @reporter.step("Lists all managed policies that are attached to the specified IAM user") + def iam_list_attached_user_policies(self, user_name: str) -> dict: + response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + + return response + + + @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) + + assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" + assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" + + return response + + + @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + def iam_list_group_policies(self, group_name: str) -> dict: + response = self.boto3_iam_client.list_group_policies(GroupName=group_name) + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM groups") + def iam_list_groups(self) -> dict: + response = self.boto3_iam_client.list_groups() + assert response.get("Groups"), f"Expected Groups in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + def iam_list_groups_for_user(self, user_name: str) -> dict: + response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) + assert response.get("Groups"), f"Expected Groups in response:\n{response}" + + return response + + + @reporter.step("Lists all the managed policies that are available in your AWS account") + def iam_list_policies(self) -> dict: + response = self.boto3_iam_client.list_policies() + assert response.get("Policies"), f"Expected Policies in response:\n{response}" + + return response + + + @reporter.step("Lists information about the versions of the specified managed policy") + def iam_list_policy_versions(self, policy_arn: str) -> dict: + response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) + assert response.get("Versions"), f"Expected Versions in response:\n{response}" + + return response + + + @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + def iam_list_user_policies(self, user_name: str) -> dict: + response = self.boto3_iam_client.list_user_policies(UserName=user_name) + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM users") + def iam_list_users(self) -> dict: + response = self.boto3_iam_client.list_users() + assert response.get("Users"), f"Expected Users in response:\n{response}" + + return response + + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + response = self.boto3_iam_client.put_group_policy(GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + response = self.boto3_iam_client.put_user_policy(UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Removes the specified user from the specified group") + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name) + return response + + + @reporter.step("Updates the name and/or the path of the specified IAM group") + def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: + response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath='/') + + return response + + + @reporter.step("Updates the name and/or the path of the specified IAM user") + def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: + response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath='/') + return response \ No newline at end of file diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index b6a10e3..6c2a8e5 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -50,7 +50,7 @@ class BucketContainerResolver(ABC): class S3ClientWrapper(HumanReadableABC): @abstractmethod - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str) -> None: + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: pass @abstractmethod @@ -395,3 +395,154 @@ class S3ClientWrapper(HumanReadableABC): """cp directory TODO: Add proper description""" # END OF OBJECT METHODS # + + + # IAM METHODS # + + @abstractmethod + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + '''Adds the specified user to the specified group''' + + @abstractmethod + def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict: + '''Attaches the specified managed policy to the specified IAM group''' + + @abstractmethod + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + '''Attaches the specified managed policy to the specified user''' + + @abstractmethod + def iam_create_access_key(self, user_name: str) -> dict: + '''Creates a new AWS secret access key and access key ID for the specified user''' + + @abstractmethod + def iam_create_group(self, group_name: str) -> dict: + '''Creates a new group''' + + @abstractmethod + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + '''Creates a new managed policy for your AWS account''' + + @abstractmethod + def iam_create_user(self, user_name: str) -> dict: + '''Creates a new IAM user for your AWS account''' + + @abstractmethod + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + '''Deletes the access key pair associated with the specified IAM user''' + + @abstractmethod + def iam_delete_group(self, group_name: str) -> dict: + '''Deletes the specified IAM group''' + + @abstractmethod + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + '''Deletes the specified inline policy that is embedded in the specified IAM group''' + + @abstractmethod + def iam_delete_policy(self, policy_arn: str) -> dict: + '''Deletes the specified managed policy''' + + @abstractmethod + def iam_delete_user(self, user_name: str) -> dict: + '''Deletes the specified IAM user''' + + @abstractmethod + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + '''Deletes the specified inline policy that is embedded in the specified IAM user''' + + @abstractmethod + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + '''Removes the specified managed policy from the specified IAM group''' + + @abstractmethod + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + '''Removes the specified managed policy from the specified user''' + + @abstractmethod + def iam_get_group(self, group_name: str) -> dict: + '''Returns a list of IAM users that are in the specified IAM group''' + + @abstractmethod + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + '''Retrieves the specified inline policy document that is embedded in the specified IAM group''' + + @abstractmethod + def iam_get_policy(self, policy_arn: str) -> dict: + '''Retrieves information about the specified managed policy''' + + @abstractmethod + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + '''Retrieves information about the specified version of the specified managed policy''' + + @abstractmethod + def iam_get_user(self, user_name: str) -> dict: + '''Retrieves information about the specified IAM user''' + + @abstractmethod + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + '''Retrieves the specified inline policy document that is embedded in the specified IAM user''' + + @abstractmethod + def iam_list_access_keys(self, user_name: str) -> dict: + '''Returns information about the access key IDs associated with the specified IAM user''' + + @abstractmethod + def iam_list_attached_group_policies(self, group_name: str) -> dict: + '''Lists all managed policies that are attached to the specified IAM group''' + + @abstractmethod + def iam_list_attached_user_policies(self, user_name: str) -> dict: + '''Lists all managed policies that are attached to the specified IAM user''' + + @abstractmethod + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + '''Lists all IAM users, groups, and roles that the specified managed policy is attached to''' + + @abstractmethod + def iam_list_group_policies(self, group_name: str) -> dict: + '''Lists the names of the inline policies that are embedded in the specified IAM group''' + + @abstractmethod + def iam_list_groups(self) -> dict: + '''Lists the IAM groups''' + + @abstractmethod + def iam_list_groups_for_user(self, user_name: str) -> dict: + '''Lists the IAM groups that the specified IAM user belongs to''' + + @abstractmethod + def iam_list_policies(self) -> dict: + '''Lists all the managed policies that are available in your AWS account''' + + @abstractmethod + def iam_list_policy_versions(self, policy_arn: str) -> dict: + '''Lists information about the versions of the specified managed policy''' + + @abstractmethod + def iam_list_user_policies(self, user_name: str) -> dict: + '''Lists the names of the inline policies embedded in the specified IAM user''' + + @abstractmethod + def iam_list_users(self) -> dict: + '''Lists the IAM users''' + + @abstractmethod + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + '''Adds or updates an inline policy document that is embedded in the specified IAM group''' + + @abstractmethod + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + '''Adds or updates an inline policy document that is embedded in the specified IAM user''' + + @abstractmethod + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + '''Removes the specified user from the specified group''' + + @abstractmethod + def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + '''Updates the name and/or the path of the specified IAM group''' + + @abstractmethod + def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + '''Updates the name and/or the path of the specified IAM user''' diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 41d52ab..0a1b5fd 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -41,7 +41,7 @@ def _run_with_passwd(cmd: str) -> str: return cmd.decode() -def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str: +def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_format: str = "json") -> str: child = pexpect.spawn(cmd) child.delaybeforesend = 1 @@ -52,7 +52,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = child.sendline(access_key) child.expect("Default region name.*") - child.sendline("") + child.sendline("region") child.expect("Default output format.*") child.sendline(out_format) From 82a8f9bab3e7938102c90bbe402fe2a586051fdb Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 11 Apr 2024 11:46:04 +0300 Subject: [PATCH 143/274] [#205] Propagate SETUP_TIMEOUT option Signed-off-by: a.berezin --- .../storage/controllers/background_load_controller.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index a8588ff..5628282 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -194,7 +194,7 @@ class BackgroundLoadController: working_dir=self.load_params.working_dir, endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, - setup_timeout="1s", + setup_timeout=self.load_params.setup_timeout, ) if self.verification_params.custom_registry: From a85070e957e4d6b00dcdd838ccffd443e5fa8e9e Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 15 Apr 2024 12:35:33 +0300 Subject: [PATCH 144/274] [#206] Change epoch in func set status node, to 2 Signed-off-by: Dmitriy Zayakin --- .../storage/controllers/cluster_state_controller.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 03648f5..4003dfd 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -432,9 +432,10 @@ class ClusterStateController: if not await_tick: return - with reporter.step("Tick 1 epoch and await 2 block"): - frostfs_adm.morph.force_new_epoch() - time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) + with reporter.step("Tick 2 epoch with 2 block await."): + for _ in range(2): + frostfs_adm.morph.force_new_epoch() + time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) self.await_node_status(status, wallet, cluster_node) From 70f03579602b941a660b66a1f0a6e4978d657062 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Mon, 15 Apr 2024 16:50:54 +0300 Subject: [PATCH 145/274] [#207] Fix shards for disabled write_cache Signed-off-by: a.berezin --- src/frostfs_testlib/storage/dataclasses/shard.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/shard.py b/src/frostfs_testlib/storage/dataclasses/shard.py index 170a477..bebdbf5 100644 --- a/src/frostfs_testlib/storage/dataclasses/shard.py +++ b/src/frostfs_testlib/storage/dataclasses/shard.py @@ -56,9 +56,7 @@ class Shard: var_prefix = f"{SHARD_PREFIX}{shard_id}" blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id) - blobstors = [ - Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count) - ] + blobstors = [Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)] write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED") @@ -71,7 +69,13 @@ class Shard: @staticmethod def from_object(shard): metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"] + writecache_enabled = True + if "enabled" in shard["writecache"]: + writecache_enabled = shard["writecache"]["enabled"] + writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"] + if not writecache_enabled: + writecache = "" # Currently due to issue we need to check if pilorama exists in keys # TODO: make pilorama mandatory after fix From 541a3e0636e3bb4f9ab4d31a5aec3826f2382467 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 17 Apr 2024 11:03:47 +0300 Subject: [PATCH 146/274] [#208] Add await for search func Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/object.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 5fe6054..cd58ec3 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -13,6 +13,7 @@ from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output @@ -695,6 +696,7 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: } +@wait_for_success() @reporter.step("Search object nodes") def get_object_nodes( cluster: Cluster, From 80c65b454e08e9ee1957f25dd0ebefbe138218b4 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 8 Apr 2024 12:13:59 +0300 Subject: [PATCH 147/274] [#203] Remove hostnames cludges Signed-off-by: Andrey Berezin --- src/frostfs_testlib/steps/http/http_gate.py | 17 +++--------- src/frostfs_testlib/storage/cluster.py | 26 +++---------------- src/frostfs_testlib/storage/constants.py | 2 -- .../storage/dataclasses/frostfs_services.py | 9 ------- 4 files changed, 8 insertions(+), 46 deletions(-) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 3f4d838..373283f 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -50,9 +50,7 @@ def get_via_http_gate( else: request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get( - request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False - ) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -118,7 +116,6 @@ def get_via_http_gate_by_attribute( cid: CID to get object from attribute: attribute {name: attribute} value pair endpoint: http gate endpoint - http_hostname: http host name on the node request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] """ attr_name = list(attribute.keys())[0] @@ -129,9 +126,7 @@ def get_via_http_gate_by_attribute( else: request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get( - request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]} - ) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -151,11 +146,8 @@ def get_via_http_gate_by_attribute( return file_path -# TODO: pass http_hostname as a header @reporter.step("Upload via HTTP Gate") -def upload_via_http_gate( - cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 -) -> str: +def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str: """ This function upload given object through HTTP gate cid: CID to get object from @@ -198,7 +190,6 @@ def is_object_large(filepath: str) -> bool: return False -# TODO: pass http_hostname as a header @reporter.step("Upload via HTTP Gate using Curl") def upload_via_http_gate_curl( cid: str, @@ -259,7 +250,7 @@ def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") curl = GenericCli("curl", node.host) - curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {file_path}", shell=local_shell) + curl(f"-k ", f"{request} > {file_path}", shell=local_shell) return file_path diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 23130cb..15827cf 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -141,30 +141,16 @@ class ClusterNode: return self.host.config.interfaces[interface.value] def get_data_interfaces(self) -> list[str]: - return [ - ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface - ] + return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface] def get_data_interface(self, search_interface: str) -> list[str]: - return [ - self.host.config.interfaces[interface] - for interface in self.host.config.interfaces.keys() - if search_interface == interface - ] + return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_interface == interface] def get_internal_interfaces(self) -> list[str]: - return [ - ip_address - for name_interface, ip_address in self.host.config.interfaces.items() - if "internal" in name_interface - ] + return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "internal" in name_interface] def get_internal_interface(self, search_internal: str) -> list[str]: - return [ - self.host.config.interfaces[interface] - for interface in self.host.config.interfaces.keys() - if search_internal == interface - ] + return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_internal == interface] class Cluster: @@ -175,8 +161,6 @@ class Cluster: default_rpc_endpoint: str default_s3_gate_endpoint: str default_http_gate_endpoint: str - default_http_hostname: str - default_s3_hostname: str def __init__(self, hosting: Hosting) -> None: self._hosting = hosting @@ -185,8 +169,6 @@ class Cluster: self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint() self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint() self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint() - self.default_http_hostname = self.services(StorageNode)[0].get_http_hostname() - self.default_s3_hostname = self.services(StorageNode)[0].get_s3_hostname() @property def hosts(self) -> list[Host]: diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 3d75988..66bf5cc 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -16,5 +16,3 @@ class ConfigAttributes: ENDPOINT_PROMETHEUS = "endpoint_prometheus" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" - HTTP_HOSTNAME = "http_hostname" - S3_HOSTNAME = "s3_hostname" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 9e671d5..16efd72 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -154,15 +154,6 @@ class StorageNode(NodeBase): def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) - def get_storage_config(self) -> str: - return self.host.get_storage_config(self.name) - - def get_http_hostname(self) -> list[str]: - return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) - - def get_s3_hostname(self) -> list[str]: - return self._get_attribute(ConfigAttributes.S3_HOSTNAME) - def delete_blobovnicza(self): self.host.delete_blobovnicza(self.name) From c0e37c8138c59b898f2a457876e6a1ac0e2f2523 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Tue, 23 Apr 2024 22:37:54 +0300 Subject: [PATCH 148/274] [#210] Return response in complete_multipart_upload function --- src/frostfs_testlib/s3/aws_cli_client.py | 5 ++++- src/frostfs_testlib/s3/boto3_client.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 470e7a3..e9811a5 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -729,7 +729,10 @@ class AwsCliClient(S3ClientWrapper): f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) - self.local_shell.exec(cmd) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response @reporter.step("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 46cfe4b..f9b8b16 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -571,6 +571,8 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Complete multipart upload", response) + return response + @reporter.step("Put object retention") @report_error def put_object_retention( From 5b715877b3257b91dd8a3f80a1fe1f61a50828a5 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Wed, 24 Apr 2024 14:58:30 +0300 Subject: [PATCH 149/274] [#214] Removed x10 wait in delete bucket function --- src/frostfs_testlib/s3/boto3_client.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index f9b8b16..9801dbd 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -135,7 +135,7 @@ class Boto3ClientWrapper(S3ClientWrapper): s3_bucket = self.boto3_client.create_bucket(**params) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME) return bucket @reporter.step("List buckets S3") @@ -156,7 +156,7 @@ class Boto3ClientWrapper(S3ClientWrapper): def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) log_command_execution("S3 Delete bucket result", response) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME) @reporter.step("Head bucket S3") @report_error @@ -372,7 +372,7 @@ class Boto3ClientWrapper(S3ClientWrapper): } response = self.boto3_client.delete_object(**params) log_command_execution("S3 Delete object result", response) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME) return response @reporter.step("Delete objects S3") @@ -383,7 +383,7 @@ class Boto3ClientWrapper(S3ClientWrapper): assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME) return response @reporter.step("Delete object versions S3") From a32bd120f23496f22f3aece52549605099d3d7d3 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 3 May 2024 17:12:54 +0300 Subject: [PATCH 150/274] [#218] Add ns attribute for container create Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/container.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index b5592e8..43c3ec6 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -9,6 +9,8 @@ class FrostfsCliContainer(CliCommand): self, rpc_endpoint: str, wallet: Optional[str] = None, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, address: Optional[str] = None, attributes: Optional[dict] = None, basic_acl: Optional[str] = None, @@ -45,6 +47,8 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + nns_zone: Container nns zone attribute. + nns_name: Container nns name attribute. Returns: Command's result. From 0306c09bed17a5fae6fad9df9c72da11176c4dc5 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Thu, 25 Apr 2024 20:50:33 +0300 Subject: [PATCH 151/274] [#216] Add parameter max_total_size_gb --- src/frostfs_testlib/load/interfaces/summarized.py | 2 +- src/frostfs_testlib/load/load_config.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py index 54947b4..4be33ef 100644 --- a/src/frostfs_testlib/load/interfaces/summarized.py +++ b/src/frostfs_testlib/load/interfaces/summarized.py @@ -86,7 +86,7 @@ class SummarizedStats: target.latencies.by_node[node_key] = operation.latency target.throughput += operation.throughput target.errors.threshold = load_params.error_threshold - target.total_bytes = operation.total_bytes + target.total_bytes += operation.total_bytes if operation.failed_iterations: target.errors.by_node[node_key] = operation.failed_iterations diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 2a546c4..e0625a9 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -233,6 +233,8 @@ class LoadParams: ) # Percentage of filling of all data disks on all nodes fill_percent: Optional[float] = None + # if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved. + max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB") # if set, the payload is generated on the fly and is not read into memory fully. streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) # Output format From 3e64b523065828bcbb84ec9bede0959c03ffaec3 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 13 May 2024 13:34:37 +0300 Subject: [PATCH 152/274] [#220] add container metrics --- src/frostfs_testlib/storage/cluster.py | 3 +++ .../storage/dataclasses/metrics.py | 22 +++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 src/frostfs_testlib/storage/dataclasses/metrics.py diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 15827cf..9fcc4c9 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -14,6 +14,7 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, Inner from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry +from frostfs_testlib.storage.dataclasses.metrics import Metrics class ClusterNode: @@ -24,11 +25,13 @@ class ClusterNode: class_registry: ServiceRegistry id: int host: Host + metrics: Metrics def __init__(self, host: Host, id: int) -> None: self.host = host self.id = id self.class_registry = get_service_registry() + self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint()) @property def host_ip(self): diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py new file mode 100644 index 0000000..49c59bc --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -0,0 +1,22 @@ +from frostfs_testlib.hosting import Host +from frostfs_testlib.shell.interfaces import CommandResult + + +class Metrics: + def __init__(self, host: Host, metrics_endpoint: str) -> None: + self.storage = StorageMetrics(host, metrics_endpoint) + + + +class StorageMetrics: + """ + Class represents storage metrics in a cluster + """ + def __init__(self, host: Host, metrics_endpoint: str) -> None: + self.host = host + self.metrics_endpoint = metrics_endpoint + + def get_metric_container(self, metric: str, cid: str) -> CommandResult: + shell = self.host.get_shell() + result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {metric} |grep {cid}") + return result From 3fee7aa1976e243cc0d03efaf9780fcd4dc385ed Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 13 May 2024 16:01:35 +0300 Subject: [PATCH 153/274] [#221] Added new control command CLI --- .../cli/frostfs_cli/control.py | 155 +++++++++++++++++- 1 file changed, 153 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/control.py b/src/frostfs_testlib/cli/frostfs_cli/control.py index 2cddfdf..957bca9 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/control.py +++ b/src/frostfs_testlib/cli/frostfs_cli/control.py @@ -69,7 +69,7 @@ class FrostfsCliControl(CliCommand): wallet: Path to the wallet or binary key address: Address of wallet account endpoint: Remote node control address (as 'multiaddr' or ':') - objects: List of object addresses to be removed in string format + objects: List of object addresses to be removed in string format timeout: Timeout for an operation (default 15s) Returns: @@ -78,4 +78,155 @@ class FrostfsCliControl(CliCommand): return self._execute( "control drop-objects", **{param: value for param, value in locals().items() if param not in ["self"]}, - ) \ No newline at end of file + ) + + def add_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + rule: Optional[list[str]] = None, + path: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + path: Path to encoded chain in JSON or binary format + rule: Rule statement + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control add-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address string Address of wallet account + chain-id string Chain id + chain-id-hex Flag to parse chain ID as hex + endpoint string Remote node control address (as 'multiaddr' or ':') + target-name string Resource name in APE resource name format + target-type string Resource type(container/namespace) + timeout duration Timeout for an operation (default 15s) + wallet string Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control get-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_rules( + self, + endpoint: str, + target_name: str, + target_type: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control list-rules", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_targets( + self, + endpoint: str, + chain_name: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-name: Chain name(ingress|s3) + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control list-targets", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + all: Optional[bool] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + all: Remove all chains + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control remove-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) From b8ce75b299b9a748ac0c040a31710a53cfdb5b30 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 16 May 2024 12:47:46 +0300 Subject: [PATCH 154/274] [#224] Restore invalid_obj check Signed-off-by: a.berezin --- src/frostfs_testlib/load/load_verifiers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index cbf6f64..97b0ffa 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -57,6 +57,8 @@ class LoadVerifier: invalid_objects = verify_metrics.read.failed_iterations total_left_objects = load_metrics.write.success_iterations - delete_success + if invalid_objects > 0: + issues.append(f"There were {invalid_objects} verification fails (hash mismatch).") # Due to interruptions we may see total verified objects to be less than written on writers count if abs(total_left_objects - verified_objects) > writers: issues.append( From 37a1177a3c5e18bca7ad63849787aafe09d0bd37 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 16 May 2024 10:13:11 +0300 Subject: [PATCH 155/274] Added delete bucket policy method to s3 client --- src/frostfs_testlib/s3/aws_cli_client.py | 10 ++++++++++ src/frostfs_testlib/s3/boto3_client.py | 7 +++++++ src/frostfs_testlib/s3/interfaces.py | 4 ++++ 3 files changed, 21 insertions(+) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index e9811a5..69a097b 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -489,6 +489,16 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Policy") + @reporter.step("Delete bucket policy") + def delete_bucket_policy(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + @reporter.step("Put bucket policy") def put_bucket_policy(self, bucket: str, policy: dict) -> None: # Leaving it as is was in test repo. Double dumps to escape resulting string diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 9801dbd..59da55a 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -246,6 +246,13 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 get_bucket_policy result", response) return response.get("Policy") + @reporter.step("Delete bucket policy") + @report_error + def delete_bucket_policy(self, bucket: str) -> str: + response = self.boto3_client.delete_bucket_policy(Bucket=bucket) + log_command_execution("S3 delete_bucket_policy result", response) + return response + @reporter.step("Put bucket policy") @report_error def put_bucket_policy(self, bucket: str, policy: dict) -> None: diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 6c2a8e5..8cfc2bb 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -152,6 +152,10 @@ class S3ClientWrapper(HumanReadableABC): def get_bucket_policy(self, bucket: str) -> str: """Returns the policy of a specified bucket.""" + @abstractmethod + def delete_bucket_policy(self, bucket: str) -> str: + """Deletes the policy of a specified bucket.""" + @abstractmethod def put_bucket_policy(self, bucket: str, policy: dict) -> None: """Applies S3 bucket policy to an S3 bucket.""" From a563f089f605ba174a405e6f297735ce4da19077 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Tue, 21 May 2024 09:16:35 +0300 Subject: [PATCH 156/274] [#228] metrics for object --- src/frostfs_testlib/storage/dataclasses/metrics.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py index 49c59bc..c79dcf8 100644 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -20,3 +20,17 @@ class StorageMetrics: shell = self.host.get_shell() result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {metric} |grep {cid}") return result + + def get_metrics_search_by_greps(self, **greps) -> CommandResult: + """ + Get a metrics, search by: cid, metric_type, shard_id etc. + Args: + greps: dict of grep-command-name and value + for example get_metrics_search_by_greps(command='container_objects_total', cid='123456') + Return: + result of metrics + """ + shell = self.host.get_shell() + additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) + result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") + return result From e7423938e95e3cc55129a9fc297d78292db5b40f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 30 May 2024 09:12:21 +0300 Subject: [PATCH 157/274] [#232]Change provide methods --- src/frostfs_testlib/credentials/interfaces.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/credentials/interfaces.py b/src/frostfs_testlib/credentials/interfaces.py index c863da0..b2ae6f1 100644 --- a/src/frostfs_testlib/credentials/interfaces.py +++ b/src/frostfs_testlib/credentials/interfaces.py @@ -26,7 +26,7 @@ class S3CredentialsProvider(ABC): self.cluster = cluster @abstractmethod - def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials: + def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials: raise NotImplementedError("Directly called abstract class?") @@ -35,7 +35,7 @@ class GrpcCredentialsProvider(ABC): self.cluster = cluster @abstractmethod - def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: + def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo: raise NotImplementedError("Directly called abstract class?") From ea1b3481205d355f96e66816744e1a372c18c987 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 31 May 2024 09:44:17 +0300 Subject: [PATCH 158/274] [#232] grpc metrics --- src/frostfs_testlib/cli/frostfs_cli/tree.py | 24 +++++++++++++ .../healthcheck/basic_healthcheck.py | 8 +++++ src/frostfs_testlib/healthcheck/interfaces.py | 4 +++ src/frostfs_testlib/steps/cli/tree.py | 35 +++++++++++++++++++ 4 files changed, 71 insertions(+) create mode 100644 src/frostfs_testlib/steps/cli/tree.py diff --git a/src/frostfs_testlib/cli/frostfs_cli/tree.py b/src/frostfs_testlib/cli/frostfs_cli/tree.py index af330fe..c75b526 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/tree.py +++ b/src/frostfs_testlib/cli/frostfs_cli/tree.py @@ -27,3 +27,27 @@ class FrostfsCliTree(CliCommand): "tree healthcheck", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def list( + self, + cid: str, + rpc_endpoint: Optional[str] = None, + wallet: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Get Tree List + + Args: + cid: Container ID. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + timeout: duration Timeout for the operation (default 15 s) + + Returns: + Command's result. + + """ + return self._execute( + "tree list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 0443e28..fc7ba59 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -47,6 +47,14 @@ class BasicHealthcheck(Healthcheck): self._perform(cluster_node, checks) + @wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}") + def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: + checks = { + self._tree_healthcheck: {}, + } + + self._perform(cluster_node, checks) + @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}") def services_healthcheck(self, cluster_node: ClusterNode): svcs_to_check = cluster_node.services diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py index c665b8a..cf17852 100644 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -19,3 +19,7 @@ class Healthcheck(ABC): @abstractmethod def services_healthcheck(self, cluster_node: ClusterNode): """Perform service status check on target cluster node""" + + @abstractmethod + def tree_healthcheck(self, cluster_node: ClusterNode): + """Perform tree healthcheck on target cluster node""" diff --git a/src/frostfs_testlib/steps/cli/tree.py b/src/frostfs_testlib/steps/cli/tree.py new file mode 100644 index 0000000..4b0dfb3 --- /dev/null +++ b/src/frostfs_testlib/steps/cli/tree.py @@ -0,0 +1,35 @@ +import logging +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.plugins import load_plugin +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + +logger = logging.getLogger("NeoLogger") + + + +@reporter.step("Get Tree List") +def get_tree_list( + wallet: WalletInfo, + cid: str, + shell: Shell, + endpoint: str, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> None: + """ + A wrapper for `frostfs-cli tree list` call. + Args: + wallet (WalletInfo): path to a wallet on whose behalf we delete the container + cid (str): ID of the container to delete + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + This function doesn't return anything. + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout) From ec42b156ac8ea8c17e8a6f53aa5da7cdce986f5a Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 4 Jun 2024 12:46:32 +0300 Subject: [PATCH 159/274] [#236] Add EC logic this HEAD command CLI --- src/frostfs_testlib/steps/cli/object.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index cd58ec3..3e0806c 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -615,6 +615,11 @@ def head_object( fst_line_idx = result.stdout.find("\n") decoded = json.loads(result.stdout[fst_line_idx:]) + # if response + if "chunks" in decoded.keys(): + logger.info("decoding ec chunks") + return decoded["chunks"] + # If response is Complex Object header, it has `splitId` key if "splitId" in decoded.keys(): logger.info("decoding split header") From a3b78559a961b738554aa1afb4bf199ea42b582f Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 5 Jun 2024 13:11:08 +0300 Subject: [PATCH 160/274] [#238] Update S3 acl verify method Signed-off-by: a.berezin --- .../resources/error_patterns.py | 5 +- .../resources/s3_acl_grants.py | 9 ++++ src/frostfs_testlib/steps/s3/s3_helper.py | 46 +++++++++---------- 3 files changed, 32 insertions(+), 28 deletions(-) create mode 100644 src/frostfs_testlib/resources/s3_acl_grants.py diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index e2e4c48..e92b33d 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -23,6 +23,5 @@ INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow" INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" -S3_MALFORMED_XML_REQUEST = ( - "The XML you provided was not well-formed or did not validate against our published schema." -) +S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" +S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." diff --git a/src/frostfs_testlib/resources/s3_acl_grants.py b/src/frostfs_testlib/resources/s3_acl_grants.py new file mode 100644 index 0000000..37005e8 --- /dev/null +++ b/src/frostfs_testlib/resources/s3_acl_grants.py @@ -0,0 +1,9 @@ +ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers" +ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"} +ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"} +CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"} + +# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl +PRIVATE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT] +PUBLIC_READ_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_READ_GRANT] +PUBLIC_READ_WRITE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT] diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index baf362b..ab0cee3 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -120,32 +120,28 @@ def assert_object_lock_mode( ).days == retain_period, f"Expected retention period is {retain_period} days" -def assert_s3_acl(acl_grants: list, permitted_users: str): - if permitted_users == "AllUsers": - grantees = {"AllUsers": 0, "CanonicalUser": 0} - for acl_grant in acl_grants: - if acl_grant.get("Grantee", {}).get("Type") == "Group": - uri = acl_grant.get("Grantee", {}).get("URI") - permission = acl_grant.get("Permission") - assert (uri, permission) == ( - "http://acs.amazonaws.com/groups/global/AllUsers", - "FULL_CONTROL", - ), "All Groups should have FULL_CONTROL" - grantees["AllUsers"] += 1 - if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": - permission = acl_grant.get("Permission") - assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL" - grantees["CanonicalUser"] += 1 - assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL" - assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL" +def _format_grants_as_strings(grants: list[dict]) -> list: + grantee_format = "{g_type}::{uri}:{permission}" + return set( + [ + grantee_format.format( + g_type=grant.get("Grantee", {}).get("Type", ""), + uri=grant.get("Grantee", {}).get("URI", ""), + permission=grant.get("Permission", ""), + ) + for grant in grants + ] + ) - if permitted_users == "CanonicalUser": - for acl_grant in acl_grants: - if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": - permission = acl_grant.get("Permission") - assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL" - else: - logger.error("FULL_CONTROL is given to All Users") + +@reporter.step("Verify ACL permissions") +def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True): + actual_grants = _format_grants_as_strings(actual_acl_grants) + expected_grants = _format_grants_as_strings(expected_acl_grants) + + assert expected_grants <= actual_grants, "Permissions mismatch" + if strict: + assert expected_grants == actual_grants, "Extra permissions found, must not be there" @reporter.step("Delete bucket with all objects") From 5d192524a00b0327ccda0dc3898fe7045dc2976d Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 6 Jun 2024 15:10:36 +0300 Subject: [PATCH 161/274] [#243] New error patterns --- src/frostfs_testlib/resources/error_patterns.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index e92b33d..5491a7a 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -25,3 +25,6 @@ INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." + +RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" +RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" From 10821f4c494d7ac6e7d9e380bc655423fa149e16 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Wed, 5 Jun 2024 15:13:22 +0300 Subject: [PATCH 162/274] [#239] write cache metrics --- src/frostfs_testlib/load/load_config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index e0625a9..1128096 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -119,6 +119,8 @@ class NodesSelectionStrategy(Enum): ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST" # Select ONE random node except under test (useful for failover). RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST" + # Select node under test + NODE_UNDER_TEST = "NODE_UNDER_TEST" class EndpointSelectionStrategy(Enum): From bfd7f70b6cc51703dc506ad55c009de6d172bd8c Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Wed, 5 Jun 2024 16:38:55 +0300 Subject: [PATCH 163/274] [#241] Methods for tag IAM user --- src/frostfs_testlib/s3/aws_cli_client.py | 43 ++++++++++++++++++++++++ src/frostfs_testlib/s3/boto3_client.py | 19 +++++++++++ src/frostfs_testlib/s3/interfaces.py | 12 +++++++ 3 files changed, 74 insertions(+) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 69a097b..3bf335e 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -1332,4 +1332,47 @@ class AwsCliClient(S3ClientWrapper): return response + @reporter.step("Adds one or more tags to an IAM user") + def iam_tag_user(self, user_name: str, tags: list) -> dict: + tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + cmd = ( + f"aws {self.common_flags} iam tag-user --user-name {user_name} --tags '{json.dumps(tags_json)}' --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("List tags of IAM user") + def iam_list_user_tags(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Removes the specified tags from the user") + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + tag_keys_joined = ' '.join(tag_keys) + cmd = ( + f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 59da55a..bed316b 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -963,4 +963,23 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Updates the name and/or the path of the specified IAM user") def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath='/') + return response + + + @reporter.step("Adds one or more tags to an IAM user") + def iam_tag_user(self, user_name: str, tags: list) -> dict: + tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + response = self.boto3_iam_client.tag_user(UserName=user_name, Tags=tags_json) + return response + + + @reporter.step("List tags of IAM user") + def iam_list_user_tags(self, user_name: str) -> dict: + response = self.boto3_iam_client.list_user_tags(UserName=user_name) + return response + + + @reporter.step("Removes the specified tags from the user") + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + response = self.boto3_iam_client.untag_user(UserName=user_name, TagKeys=tag_keys) return response \ No newline at end of file diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 8cfc2bb..651be7a 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -550,3 +550,15 @@ class S3ClientWrapper(HumanReadableABC): @abstractmethod def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: '''Updates the name and/or the path of the specified IAM user''' + + @abstractmethod + def iam_tag_user(self, user_name: str, tags: list) -> dict: + '''Adds one or more tags to an IAM user''' + + @abstractmethod + def iam_list_user_tags(self, user_name: str) -> dict: + '''List tags of IAM user''' + + @abstractmethod + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + '''Removes the specified tags from the user''' \ No newline at end of file From 7a482152a8a067a10da645c8401ef46cfb55f363 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 7 Jun 2024 17:03:39 +0300 Subject: [PATCH 164/274] [#245] Update versions check Signed-off-by: a.berezin --- src/frostfs_testlib/utils/version_utils.py | 83 ++++++++-------------- 1 file changed, 29 insertions(+), 54 deletions(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index f1b7e37..7fcc9de 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -1,5 +1,6 @@ import logging import re +from functools import lru_cache from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli @@ -36,78 +37,52 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: return versions +@reporter.step("Collect binaries versions from host") def parallel_binary_verions(host: Host) -> dict[str, str]: versions_by_host = {} - binary_path_by_name = {} # Maps binary name to executable path - for service_config in host.config.services: - exec_path = service_config.attributes.get("exec_path") - requires_check = service_config.attributes.get("requires_version_check", "true") - if exec_path: - binary_path_by_name[service_config.name] = { - "exec_path": exec_path, - "check": requires_check.lower() == "true", + binary_path_by_name = { + **{ + svc.name[:-3]: { + "exec_path": svc.attributes.get("exec_path"), + "param": svc.attributes.get("custom_version_parameter", "--version"), } - for cli_config in host.config.clis: - requires_check = cli_config.attributes.get("requires_version_check", "true") - binary_path_by_name[cli_config.name] = { - "exec_path": cli_config.exec_path, - "check": requires_check.lower() == "true", - } + for svc in host.config.services + if svc.attributes.get("exec_path") and svc.attributes.get("requires_version_check", "true") == "true" + }, + **{ + cli.name: {"exec_path": cli.exec_path, "param": cli.attributes.get("custom_version_parameter", "--version")} + for cli in host.config.clis + if cli.attributes.get("requires_version_check", "true") == "true" + }, + } shell = host.get_shell() versions_at_host = {} for binary_name, binary in binary_path_by_name.items(): + binary_path = binary["exec_path"] try: - binary_path = binary["exec_path"] - result = shell.exec(f"{binary_path} --version") - versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} + result = shell.exec(f"{binary_path} {binary['param']}") + version = _parse_version(result.stdout) or _parse_version(result.stderr) or "Unknown" + versions_at_host[binary_name] = version except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") - versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} + versions_at_host[binary_name] = "Unknown" versions_by_host[host.config.address] = versions_at_host return versions_by_host -@reporter.step("Get remote binaries versions") -def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: - versions_by_host = {} - future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) +@lru_cache +def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]: + versions_by_host: dict[str, dict[str, str]] = {} + + with reporter.step("Get remote binaries versions"): + future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) + for future in future_binary_verions: versions_by_host.update(future.result()) - # Consolidate versions across all hosts - cheak_versions = {} - exсeptions = [] - exception = set() - previous_host = None - versions = {} - captured_version = None - for host, binary_versions in versions_by_host.items(): - for name, binary in binary_versions.items(): - version = binary["version"] - if not cheak_versions.get(f"{name[:-2]}", None): - captured_version = cheak_versions.get(f"{name[:-2]}", {}).get(host, {}).get(captured_version) - cheak_versions[f"{name[:-2]}"] = {host: {version: name}} - else: - captured_version = list(cheak_versions.get(f"{name[:-2]}", {}).get(previous_host).keys())[0] - cheak_versions[f"{name[:-2]}"].update({host: {version: name}}) - - if captured_version and captured_version != version: - exception.add(name[:-2]) - - versions[name] = {"version": version, "check": binary["check"]} - previous_host = host - logger.info( - "Remote binaries versions:\n" + "\n".join([f"{key} ver: {value['version']}" for key, value in versions.items()]) - ) - if exception: - for i in exception: - for host in versions_by_host.keys(): - for version, name in cheak_versions.get(i).get(host).items(): - exсeptions.append(f"Binary {name} has inconsistent version {version} on host {host}") - exсeptions.append("\n") - return versions, exсeptions + return versions_by_host def _parse_version(version_output: str) -> str: From cb31d41f15c2ff54dd2efe4d6985e365e5bfaffe Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 18 Jun 2024 13:37:07 +0300 Subject: [PATCH 165/274] [#247] Use TestFiles which automatically deletes itself Signed-off-by: a.berezin --- src/frostfs_testlib/s3/aws_cli_client.py | 211 +++++--------------- src/frostfs_testlib/s3/boto3_client.py | 126 +++--------- src/frostfs_testlib/s3/interfaces.py | 84 ++++---- src/frostfs_testlib/steps/cli/object.py | 17 +- src/frostfs_testlib/steps/http/http_gate.py | 31 ++- src/frostfs_testlib/utils/file_utils.py | 86 ++++++-- 6 files changed, 209 insertions(+), 346 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 3bf335e..f6488f5 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -14,6 +14,7 @@ from frostfs_testlib.shell.local_shell import LocalShell # TODO: Refactor this code to use shell instead of _cmd_run from frostfs_testlib.utils.cli_utils import _configure_aws_cli +from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") command_options = CommandOptions(timeout=480) @@ -153,8 +154,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: cmd = ( - f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -172,10 +172,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = ( - f"aws {self.common_flags} s3api list-objects --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -319,18 +316,18 @@ class AwsCliClient(S3ClientWrapper): version_id: Optional[str] = None, object_range: Optional[tuple[int, int]] = None, full_output: bool = False, - ) -> Union[dict, str]: - file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + ) -> dict | TestFile: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " - f"{version} {file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"{version} {test_file} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if object_range: cmd += f" --range bytes={object_range[0]}-{object_range[1]}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - return response if full_output else file_path + return response if full_output else test_file @reporter.step("Get object ACL") def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: @@ -583,7 +580,7 @@ class AwsCliClient(S3ClientWrapper): self.local_shell.exec(cmd) @reporter.step("Put object tagging") - def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None: + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} version = f" --version-id {version_id}" if version_id else "" @@ -622,8 +619,7 @@ class AwsCliClient(S3ClientWrapper): metadata: Optional[dict] = None, ) -> dict: cmd = ( - f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) if metadata: cmd += " --metadata" @@ -779,9 +775,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Adds the specified user to the specified group") def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -789,12 +783,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Attaches the specified managed policy to the specified IAM group") def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -803,12 +794,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Attaches the specified managed policy to the specified user") def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -817,12 +805,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") def iam_create_access_key(self, user_name: Optional[str] = None) -> dict: - cmd = ( - f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" if user_name: @@ -837,12 +822,9 @@ class AwsCliClient(S3ClientWrapper): return access_key_id, secret_access_key - @reporter.step("Creates a new group") def iam_create_group(self, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -853,7 +835,6 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Creates a new managed policy for your AWS account") def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: cmd = ( @@ -871,12 +852,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Creates a new IAM user for your AWS account") def iam_create_user(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -887,12 +865,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Deletes the access key pair associated with the specified IAM user") def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" @@ -901,12 +876,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Deletes the specified IAM group") def iam_delete_group(self, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -914,12 +886,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -927,12 +896,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Deletes the specified managed policy") def iam_delete_policy(self, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -940,26 +906,19 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Deletes the specified IAM user") def iam_delete_user(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - return response - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -967,12 +926,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Removes the specified managed policy from the specified IAM group") def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -981,12 +937,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Removes the specified managed policy from the specified user") def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -995,12 +948,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Returns a list of IAM users that are in the specified IAM group") def iam_get_group(self, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1011,12 +961,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1024,12 +971,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Retrieves information about the specified managed policy") def iam_get_policy(self, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1040,12 +984,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Retrieves information about the specified version of the specified managed policy") def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1056,12 +997,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Retrieves information about the specified IAM user") def iam_get_user(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1072,12 +1010,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1087,12 +1022,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Returns information about the access key IDs associated with the specified IAM user") def iam_list_access_keys(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1100,12 +1032,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists all managed policies that are attached to the specified IAM group") def iam_list_attached_group_policies(self, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1115,12 +1044,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists all managed policies that are attached to the specified IAM user") def iam_list_attached_user_policies(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1130,12 +1056,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1146,12 +1069,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") def iam_list_group_policies(self, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1161,12 +1081,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists the IAM groups") def iam_list_groups(self) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1176,12 +1093,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists the IAM groups that the specified IAM user belongs to") def iam_list_groups_for_user(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1191,27 +1105,21 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists all the managed policies that are available in your AWS account") def iam_list_policies(self) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert 'Policies' in response.keys(), f"Expected Policies in response:\n{response}" + assert "Policies" in response.keys(), f"Expected Policies in response:\n{response}" return response - @reporter.step("Lists information about the versions of the specified managed policy") def iam_list_policy_versions(self, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1221,12 +1129,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") def iam_list_user_policies(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1236,12 +1141,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists the IAM users") def iam_list_users(self) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1251,12 +1153,11 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: cmd = ( f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}" - f" --group-name {group_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'" + f" --group-name {group_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" ) if self.profile: cmd += f" --profile {self.profile}" @@ -1266,12 +1167,11 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: cmd = ( f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}" - f" --user-name {user_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'" + f" --user-name {user_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" ) if self.profile: cmd += f" --profile {self.profile}" @@ -1282,7 +1182,6 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Removes the specified user from the specified group") def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: cmd = ( @@ -1296,12 +1195,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Updates the name and/or the path of the specified IAM group") def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - cmd = ( - f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" if new_name: cmd += f" --new-group-name {new_name}" if new_path: @@ -1314,12 +1210,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Updates the name and/or the path of the specified IAM user") def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - cmd = ( - f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" if new_name: cmd += f" --new-user-name {new_name}" if new_path: @@ -1346,12 +1239,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("List tags of IAM user") def iam_list_user_tags(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" @@ -1360,13 +1250,10 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Removes the specified tags from the user") def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - tag_keys_joined = ' '.join(tag_keys) - cmd = ( - f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}" - ) + tag_keys_joined = " ".join(tag_keys) + cmd = f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" @@ -1374,5 +1261,3 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response - - diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index bed316b..bdf7a9f 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -16,10 +16,10 @@ from mypy_boto3_s3 import S3Client from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict -from frostfs_testlib.utils.cli_utils import log_command_execution # TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import _configure_aws_cli +from frostfs_testlib.utils.cli_utils import _configure_aws_cli, log_command_execution +from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") @@ -80,7 +80,6 @@ class Boto3ClientWrapper(S3ClientWrapper): verify=False, ) - @reporter.step("Set endpoint IAM to {iam_endpoint}") def set_iam_endpoint(self, iam_endpoint: str): self.boto3_iam_client = self.session.client( @@ -88,8 +87,8 @@ class Boto3ClientWrapper(S3ClientWrapper): aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, endpoint_url=iam_endpoint, - verify=False,) - + verify=False, + ) def _to_s3_param(self, param: str): replacement_map = { @@ -167,9 +166,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Put bucket versioning status") @report_error def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - response = self.boto3_client.put_bucket_versioning( - Bucket=bucket, VersioningConfiguration={"Status": status.value} - ) + response = self.boto3_client.put_bucket_versioning(Bucket=bucket, VersioningConfiguration={"Status": status.value}) log_command_execution("S3 Set bucket versioning to", response) @reporter.step("Get bucket versioning status") @@ -217,11 +214,7 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.put_bucket_acl(**params) log_command_execution("S3 ACL bucket result", response) @@ -360,11 +353,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Head object S3") @report_error def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.head_object(**params) log_command_execution("S3 Head object result", response) return response @@ -372,11 +361,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Delete object S3") @report_error def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.delete_object(**params) log_command_execution("S3 Delete object result", response) sleep(S3_SYNC_WAIT_TIME) @@ -415,9 +400,7 @@ class Boto3ClientWrapper(S3ClientWrapper): def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: - response = self.boto3_client.delete_object( - Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"] - ) + response = self.boto3_client.delete_object(Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"]) log_command_execution("S3 Delete object result", response) @reporter.step("Put object ACL") @@ -436,11 +419,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Get object ACL") @report_error def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.get_object_acl(**params) log_command_execution("S3 ACL objects result", response) return response.get("Grants") @@ -483,8 +462,7 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, object_range: Optional[tuple[int, int]] = None, full_output: bool = False, - ) -> Union[dict, str]: - filename = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + ) -> dict | TestFile: range_str = None if object_range: range_str = f"bytes={object_range[0]}-{object_range[1]}" @@ -497,12 +475,16 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.get_object(**params) log_command_execution("S3 Get objects result", response) - with open(f"{filename}", "wb") as get_file: + if full_output: + return response + + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + with open(test_file, "wb") as file: chunk = response["Body"].read(1024) while chunk: - get_file.write(chunk) + file.write(chunk) chunk = response["Body"].read(1024) - return response if full_output else filename + return test_file @reporter.step("Create multipart upload S3") @report_error @@ -573,9 +555,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @report_error def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] - response = self.boto3_client.complete_multipart_upload( - Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts} - ) + response = self.boto3_client.complete_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts}) log_command_execution("S3 Complete multipart upload", response) return response @@ -590,11 +570,7 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, bypass_governance_retention: Optional[bool] = None, ) -> None: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.put_object_retention(**params) log_command_execution("S3 Put object retention ", response) @@ -618,7 +594,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Put object tagging") @report_error - def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None: + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id) @@ -627,11 +603,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Get object tagging") @report_error def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.get_object_tagging(**params) log_command_execution("S3 Get object tagging", response) return response.get("TagSet") @@ -681,7 +653,6 @@ class Boto3ClientWrapper(S3ClientWrapper): # END OBJECT METHODS # - # IAM METHODS # # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) @@ -690,21 +661,18 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name) return response - @reporter.step("Attaches the specified managed policy to the specified IAM group") def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Attaches the specified managed policy to the specified user") def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") def iam_create_access_key(self, user_name: str) -> dict: response = self.boto3_iam_client.create_access_key(UserName=user_name) @@ -716,7 +684,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return access_key_id, secret_access_key - @reporter.step("Creates a new group") def iam_create_group(self, group_name: str) -> dict: response = self.boto3_iam_client.create_group(GroupName=group_name) @@ -725,7 +692,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Creates a new managed policy for your AWS account") def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) @@ -734,7 +700,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Creates a new IAM user for your AWS account") def iam_create_user(self, user_name: str) -> dict: response = self.boto3_iam_client.create_user(UserName=user_name) @@ -743,57 +708,48 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Deletes the access key pair associated with the specified IAM user") def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name) return response - @reporter.step("Deletes the specified IAM group") def iam_delete_group(self, group_name: str) -> dict: response = self.boto3_iam_client.delete_group(GroupName=group_name) return response - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name) return response - @reporter.step("Deletes the specified managed policy") def iam_delete_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) return response - @reporter.step("Deletes the specified IAM user") def iam_delete_user(self, user_name: str) -> dict: response = self.boto3_iam_client.delete_user(UserName=user_name) return response - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name) return response - @reporter.step("Removes the specified managed policy from the specified IAM group") def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Removes the specified managed policy from the specified user") def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Returns a list of IAM users that are in the specified IAM group") def iam_get_group(self, group_name: str) -> dict: response = self.boto3_iam_client.get_group(GroupName=group_name) @@ -801,14 +757,12 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) return response - @reporter.step("Retrieves information about the specified managed policy") def iam_get_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) @@ -817,7 +771,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Retrieves information about the specified version of the specified managed policy") def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id) @@ -826,7 +779,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Retrieves information about the specified IAM user") def iam_get_user(self, user_name: str) -> dict: response = self.boto3_iam_client.get_user(UserName=user_name) @@ -835,7 +787,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) @@ -843,14 +794,12 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Returns information about the access key IDs associated with the specified IAM user") def iam_list_access_keys(self, user_name: str) -> dict: response = self.boto3_iam_client.list_access_keys(UserName=user_name) return response - @reporter.step("Lists all managed policies that are attached to the specified IAM group") def iam_list_attached_group_policies(self, group_name: str) -> dict: response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) @@ -858,7 +807,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists all managed policies that are attached to the specified IAM user") def iam_list_attached_user_policies(self, user_name: str) -> dict: response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) @@ -866,7 +814,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") def iam_list_entities_for_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) @@ -876,7 +823,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") def iam_list_group_policies(self, group_name: str) -> dict: response = self.boto3_iam_client.list_group_policies(GroupName=group_name) @@ -884,7 +830,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists the IAM groups") def iam_list_groups(self) -> dict: response = self.boto3_iam_client.list_groups() @@ -892,7 +837,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists the IAM groups that the specified IAM user belongs to") def iam_list_groups_for_user(self, user_name: str) -> dict: response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) @@ -900,7 +844,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists all the managed policies that are available in your AWS account") def iam_list_policies(self) -> dict: response = self.boto3_iam_client.list_policies() @@ -908,7 +851,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists information about the versions of the specified managed policy") def iam_list_policy_versions(self, policy_arn: str) -> dict: response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) @@ -916,7 +858,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") def iam_list_user_policies(self, user_name: str) -> dict: response = self.boto3_iam_client.list_user_policies(UserName=user_name) @@ -924,7 +865,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists the IAM users") def iam_list_users(self) -> dict: response = self.boto3_iam_client.list_users() @@ -932,54 +872,50 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.put_group_policy(GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + response = self.boto3_iam_client.put_group_policy( + GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) + ) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.put_user_policy(UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + response = self.boto3_iam_client.put_user_policy( + UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) + ) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Removes the specified user from the specified group") def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name) return response - @reporter.step("Updates the name and/or the path of the specified IAM group") def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath='/') + response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath="/") return response - @reporter.step("Updates the name and/or the path of the specified IAM user") def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath='/') + response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath="/") return response - @reporter.step("Adds one or more tags to an IAM user") def iam_tag_user(self, user_name: str, tags: list) -> dict: tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] response = self.boto3_iam_client.tag_user(UserName=user_name, Tags=tags_json) return response - @reporter.step("List tags of IAM user") def iam_list_user_tags(self, user_name: str) -> dict: response = self.boto3_iam_client.list_user_tags(UserName=user_name) return response - @reporter.step("Removes the specified tags from the user") def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: response = self.boto3_iam_client.untag_user(UserName=user_name, TagKeys=tag_keys) - return response \ No newline at end of file + return response diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 651be7a..f3793e0 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -4,6 +4,7 @@ from typing import Literal, Optional, Union from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum +from frostfs_testlib.utils.file_utils import TestFile def _make_objs_dict(key_names): @@ -289,7 +290,7 @@ class S3ClientWrapper(HumanReadableABC): version_id: Optional[str] = None, object_range: Optional[tuple[int, int]] = None, full_output: bool = False, - ) -> Union[dict, str]: + ) -> dict | TestFile: """Retrieves objects from S3.""" @abstractmethod @@ -400,165 +401,164 @@ class S3ClientWrapper(HumanReadableABC): # END OF OBJECT METHODS # - # IAM METHODS # @abstractmethod def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - '''Adds the specified user to the specified group''' + """Adds the specified user to the specified group""" @abstractmethod def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict: - '''Attaches the specified managed policy to the specified IAM group''' + """Attaches the specified managed policy to the specified IAM group""" @abstractmethod def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - '''Attaches the specified managed policy to the specified user''' + """Attaches the specified managed policy to the specified user""" @abstractmethod def iam_create_access_key(self, user_name: str) -> dict: - '''Creates a new AWS secret access key and access key ID for the specified user''' + """Creates a new AWS secret access key and access key ID for the specified user""" @abstractmethod def iam_create_group(self, group_name: str) -> dict: - '''Creates a new group''' + """Creates a new group""" @abstractmethod def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - '''Creates a new managed policy for your AWS account''' + """Creates a new managed policy for your AWS account""" @abstractmethod def iam_create_user(self, user_name: str) -> dict: - '''Creates a new IAM user for your AWS account''' + """Creates a new IAM user for your AWS account""" @abstractmethod def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - '''Deletes the access key pair associated with the specified IAM user''' + """Deletes the access key pair associated with the specified IAM user""" @abstractmethod def iam_delete_group(self, group_name: str) -> dict: - '''Deletes the specified IAM group''' + """Deletes the specified IAM group""" @abstractmethod def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - '''Deletes the specified inline policy that is embedded in the specified IAM group''' + """Deletes the specified inline policy that is embedded in the specified IAM group""" @abstractmethod def iam_delete_policy(self, policy_arn: str) -> dict: - '''Deletes the specified managed policy''' + """Deletes the specified managed policy""" @abstractmethod def iam_delete_user(self, user_name: str) -> dict: - '''Deletes the specified IAM user''' + """Deletes the specified IAM user""" @abstractmethod def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - '''Deletes the specified inline policy that is embedded in the specified IAM user''' + """Deletes the specified inline policy that is embedded in the specified IAM user""" @abstractmethod def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - '''Removes the specified managed policy from the specified IAM group''' + """Removes the specified managed policy from the specified IAM group""" @abstractmethod def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - '''Removes the specified managed policy from the specified user''' + """Removes the specified managed policy from the specified user""" @abstractmethod def iam_get_group(self, group_name: str) -> dict: - '''Returns a list of IAM users that are in the specified IAM group''' + """Returns a list of IAM users that are in the specified IAM group""" @abstractmethod def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - '''Retrieves the specified inline policy document that is embedded in the specified IAM group''' + """Retrieves the specified inline policy document that is embedded in the specified IAM group""" @abstractmethod def iam_get_policy(self, policy_arn: str) -> dict: - '''Retrieves information about the specified managed policy''' + """Retrieves information about the specified managed policy""" @abstractmethod def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - '''Retrieves information about the specified version of the specified managed policy''' + """Retrieves information about the specified version of the specified managed policy""" @abstractmethod def iam_get_user(self, user_name: str) -> dict: - '''Retrieves information about the specified IAM user''' + """Retrieves information about the specified IAM user""" @abstractmethod def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - '''Retrieves the specified inline policy document that is embedded in the specified IAM user''' + """Retrieves the specified inline policy document that is embedded in the specified IAM user""" @abstractmethod def iam_list_access_keys(self, user_name: str) -> dict: - '''Returns information about the access key IDs associated with the specified IAM user''' + """Returns information about the access key IDs associated with the specified IAM user""" @abstractmethod def iam_list_attached_group_policies(self, group_name: str) -> dict: - '''Lists all managed policies that are attached to the specified IAM group''' + """Lists all managed policies that are attached to the specified IAM group""" @abstractmethod def iam_list_attached_user_policies(self, user_name: str) -> dict: - '''Lists all managed policies that are attached to the specified IAM user''' + """Lists all managed policies that are attached to the specified IAM user""" @abstractmethod def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - '''Lists all IAM users, groups, and roles that the specified managed policy is attached to''' + """Lists all IAM users, groups, and roles that the specified managed policy is attached to""" @abstractmethod def iam_list_group_policies(self, group_name: str) -> dict: - '''Lists the names of the inline policies that are embedded in the specified IAM group''' + """Lists the names of the inline policies that are embedded in the specified IAM group""" @abstractmethod def iam_list_groups(self) -> dict: - '''Lists the IAM groups''' + """Lists the IAM groups""" @abstractmethod def iam_list_groups_for_user(self, user_name: str) -> dict: - '''Lists the IAM groups that the specified IAM user belongs to''' + """Lists the IAM groups that the specified IAM user belongs to""" @abstractmethod def iam_list_policies(self) -> dict: - '''Lists all the managed policies that are available in your AWS account''' + """Lists all the managed policies that are available in your AWS account""" @abstractmethod def iam_list_policy_versions(self, policy_arn: str) -> dict: - '''Lists information about the versions of the specified managed policy''' + """Lists information about the versions of the specified managed policy""" @abstractmethod def iam_list_user_policies(self, user_name: str) -> dict: - '''Lists the names of the inline policies embedded in the specified IAM user''' + """Lists the names of the inline policies embedded in the specified IAM user""" @abstractmethod def iam_list_users(self) -> dict: - '''Lists the IAM users''' + """Lists the IAM users""" @abstractmethod def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - '''Adds or updates an inline policy document that is embedded in the specified IAM group''' + """Adds or updates an inline policy document that is embedded in the specified IAM group""" @abstractmethod def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - '''Adds or updates an inline policy document that is embedded in the specified IAM user''' + """Adds or updates an inline policy document that is embedded in the specified IAM user""" @abstractmethod def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - '''Removes the specified user from the specified group''' + """Removes the specified user from the specified group""" @abstractmethod def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - '''Updates the name and/or the path of the specified IAM group''' + """Updates the name and/or the path of the specified IAM group""" @abstractmethod def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - '''Updates the name and/or the path of the specified IAM user''' + """Updates the name and/or the path of the specified IAM user""" @abstractmethod def iam_tag_user(self, user_name: str, tags: list) -> dict: - '''Adds one or more tags to an IAM user''' + """Adds one or more tags to an IAM user""" @abstractmethod def iam_list_user_tags(self, user_name: str) -> dict: - '''List tags of IAM user''' + """List tags of IAM user""" @abstractmethod def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - '''Removes the specified tags from the user''' \ No newline at end of file + """Removes the specified tags from the user""" diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 3e0806c..b84a3a2 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -16,6 +16,7 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output +from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") @@ -81,7 +82,7 @@ def get_object( no_progress: bool = True, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> str: +) -> TestFile: """ GET from FrostFS. @@ -103,14 +104,14 @@ def get_object( if not write_object: write_object = str(uuid.uuid4()) - file_path = os.path.join(ASSETS_DIR, write_object) + test_file = TestFile(os.path.join(ASSETS_DIR, write_object)) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.object.get( rpc_endpoint=endpoint, cid=cid, oid=oid, - file=file_path, + file=test_file, bearer=bearer, no_progress=no_progress, xhdr=xhdr, @@ -118,7 +119,7 @@ def get_object( timeout=timeout, ) - return file_path + return test_file @reporter.step("Get Range Hash from {endpoint}") @@ -357,7 +358,7 @@ def get_range( Returns: (str, bytes) - path to the file with range content and content of this file as bytes """ - range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) + test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.object.range( @@ -365,16 +366,16 @@ def get_range( cid=cid, oid=oid, range=range_cut, - file=range_file_path, + file=test_file, bearer=bearer, xhdr=xhdr, session=session, timeout=timeout, ) - with open(range_file_path, "rb") as file: + with open(test_file, "rb") as file: content = file.read() - return range_file_path, content + return test_file, content @reporter.step("Lock Object") diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 373283f..117cded 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -12,7 +12,7 @@ import requests from frostfs_testlib import reporter from frostfs_testlib.cli import GenericCli -from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE +from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell from frostfs_testlib.shell.local_shell import LocalShell @@ -20,11 +20,10 @@ from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object from frostfs_testlib.storage.cluster import ClusterNode, StorageNode from frostfs_testlib.testing.test_control import retry -from frostfs_testlib.utils.file_utils import get_file_hash +from frostfs_testlib.utils.file_utils import TestFile, get_file_hash logger = logging.getLogger("NeoLogger") -ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") local_shell = LocalShell() @@ -64,10 +63,10 @@ def get_via_http_gate( logger.info(f"Request: {request}") _attach_allure_step(request, resp.status_code) - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}") - with open(file_path, "wb") as file: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) + with open(test_file, "wb") as file: shutil.copyfileobj(resp.raw, file) - return file_path + return test_file @reporter.step("Get via Zip HTTP Gate") @@ -93,11 +92,11 @@ def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Opt logger.info(f"Request: {request}") _attach_allure_step(request, resp.status_code) - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip") - with open(file_path, "wb") as file: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")) + with open(test_file, "wb") as file: shutil.copyfileobj(resp.raw, file) - with zipfile.ZipFile(file_path, "r") as zip_ref: + with zipfile.ZipFile(test_file, "r") as zip_ref: zip_ref.extractall(ASSETS_DIR) return os.path.join(os.getcwd(), ASSETS_DIR, prefix) @@ -140,10 +139,10 @@ def get_via_http_gate_by_attribute( logger.info(f"Request: {request}") _attach_allure_step(request, resp.status_code) - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}") - with open(file_path, "wb") as file: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")) + with open(test_file, "wb") as file: shutil.copyfileobj(resp.raw, file) - return file_path + return test_file @reporter.step("Upload via HTTP Gate") @@ -239,7 +238,7 @@ def upload_via_http_gate_curl( @retry(max_attempts=3, sleep_interval=1) @reporter.step("Get via HTTP Gate using Curl") -def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: +def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile: """ This function gets given object from HTTP gate using curl utility. cid: CID to get object from @@ -247,12 +246,12 @@ def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: node: node for request """ request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")) curl = GenericCli("curl", node.host) - curl(f"-k ", f"{request} > {file_path}", shell=local_shell) + curl(f"-k ", f"{request} > {test_file}", shell=local_shell) - return file_path + return test_file def _attach_allure_step(request: str, status_code: int, req_type="GET"): diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index d238106..e01ce31 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -10,7 +10,39 @@ from frostfs_testlib.resources.common import ASSETS_DIR logger = logging.getLogger("NeoLogger") -def generate_file(size: int) -> str: +class TestFile(os.PathLike): + def __init__(self, path: str): + self.path = path + + def __del__(self): + logger.debug(f"Removing file {self.path}") + if os.path.exists(self.path): + os.remove(self.path) + + def __str__(self): + return self.path + + def __repr__(self): + return self.path + + def __fspath__(self): + return self.path + + +def ensure_directory(path): + directory = os.path.dirname(path) + + if not os.path.exists(directory): + os.makedirs(directory) + + +def ensure_directory_opener(path, flags): + ensure_directory(path) + return os.open(path, flags) + + +@reporter.step("Generate file with size {size}") +def generate_file(size: int) -> TestFile: """Generates a binary file with the specified size in bytes. Args: @@ -19,19 +51,20 @@ def generate_file(size: int) -> str: Returns: The path to the generated file. """ - file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) - with open(file_path, "wb") as file: + test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) + with open(test_file, "wb", opener=ensure_directory_opener) as file: file.write(os.urandom(size)) - logger.info(f"File with size {size} bytes has been generated: {file_path}") + logger.info(f"File with size {size} bytes has been generated: {test_file}") - return file_path + return test_file +@reporter.step("Generate file with content of size {size}") def generate_file_with_content( size: int, - file_path: Optional[str] = None, + file_path: Optional[str | TestFile] = None, content: Optional[str] = None, -) -> str: +) -> TestFile: """Creates a new file with specified content. Args: @@ -48,20 +81,22 @@ def generate_file_with_content( content = os.urandom(size) mode = "wb" + test_file = None if not file_path: - file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + elif isinstance(file_path, TestFile): + test_file = file_path else: - if not os.path.exists(os.path.dirname(file_path)): - os.makedirs(os.path.dirname(file_path)) + test_file = TestFile(file_path) - with open(file_path, mode) as file: + with open(test_file, mode, opener=ensure_directory_opener) as file: file.write(content) - return file_path + return test_file @reporter.step("Get File Hash") -def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: +def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset: Optional[int] = None) -> str: """Generates hash for the specified file. Args: @@ -88,7 +123,7 @@ def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[in @reporter.step("Concatenation set of files to one file") -def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: +def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional[str | TestFile] = None) -> TestFile: """Concatenates several files into a single file. Args: @@ -98,16 +133,24 @@ def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> Returns: Path to the resulting file. """ + + test_file = None if not resulting_file_path: - resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - with open(resulting_file_path, "wb") as f: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + elif isinstance(resulting_file_path, TestFile): + test_file = resulting_file_path + else: + test_file = TestFile(resulting_file_path) + + with open(test_file, "wb", opener=ensure_directory_opener) as f: for file in file_paths: with open(file, "rb") as part_file: f.write(part_file.read()) - return resulting_file_path + return test_file -def split_file(file_path: str, parts: int) -> list[str]: +@reporter.step("Split file to {parts} parts") +def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]: """Splits specified file into several specified number of parts. Each part is saved under name `{original_file}_part_{i}`. @@ -129,7 +172,7 @@ def split_file(file_path: str, parts: int) -> list[str]: part_file_paths = [] for content_offset in range(0, content_size + 1, chunk_size): part_file_name = f"{file_path}_part_{part_id}" - part_file_paths.append(part_file_name) + part_file_paths.append(TestFile(part_file_name)) with open(part_file_name, "wb") as out_file: out_file.write(content[content_offset : content_offset + chunk_size]) part_id += 1 @@ -137,9 +180,8 @@ def split_file(file_path: str, parts: int) -> list[str]: return part_file_paths -def get_file_content( - file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None -) -> Any: +@reporter.step("Get file content") +def get_file_content(file_path: str | TestFile, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None) -> Any: """Returns content of specified file. Args: From f1b2fbd47bb8fed982ac3aae2a9065aa14618e5e Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 25 Jun 2024 02:31:14 +0300 Subject: [PATCH 166/274] [#250] Adjustments for tests optimization Signed-off-by: a.berezin --- src/frostfs_testlib/s3/aws_cli_client.py | 5 ----- src/frostfs_testlib/s3/boto3_client.py | 10 ++++------ src/frostfs_testlib/steps/s3/s3_helper.py | 1 - .../storage/controllers/cluster_state_controller.py | 6 ++++-- src/frostfs_testlib/testing/parallel.py | 6 ++++-- 5 files changed, 12 insertions(+), 16 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index f6488f5..3568037 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -91,7 +91,6 @@ class AwsCliClient(S3ClientWrapper): if location_constraint: cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" self.local_shell.exec(cmd) - sleep(S3_SYNC_WAIT_TIME) return bucket @@ -106,7 +105,6 @@ class AwsCliClient(S3ClientWrapper): def delete_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd, command_options) - sleep(S3_SYNC_WAIT_TIME) @reporter.step("Head bucket S3") def head_bucket(self, bucket: str) -> None: @@ -397,7 +395,6 @@ class AwsCliClient(S3ClientWrapper): ) output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME) return response @reporter.step("Delete object S3") @@ -408,7 +405,6 @@ class AwsCliClient(S3ClientWrapper): f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout - sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) @reporter.step("Delete object versions S3") @@ -435,7 +431,6 @@ class AwsCliClient(S3ClientWrapper): f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout - sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) @reporter.step("Delete object versions S3 without delete markers") diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index bdf7a9f..a8a7828 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -134,7 +134,6 @@ class Boto3ClientWrapper(S3ClientWrapper): s3_bucket = self.boto3_client.create_bucket(**params) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) - sleep(S3_SYNC_WAIT_TIME) return bucket @reporter.step("List buckets S3") @@ -155,7 +154,6 @@ class Boto3ClientWrapper(S3ClientWrapper): def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) log_command_execution("S3 Delete bucket result", response) - sleep(S3_SYNC_WAIT_TIME) @reporter.step("Head bucket S3") @report_error @@ -364,7 +362,6 @@ class Boto3ClientWrapper(S3ClientWrapper): params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.delete_object(**params) log_command_execution("S3 Delete object result", response) - sleep(S3_SYNC_WAIT_TIME) return response @reporter.step("Delete objects S3") @@ -375,7 +372,6 @@ class Boto3ClientWrapper(S3ClientWrapper): assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' - sleep(S3_SYNC_WAIT_TIME) return response @reporter.step("Delete object versions S3") @@ -413,8 +409,10 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> list: - # pytest.skip("Method put_object_acl is not supported by boto3 client") - raise NotImplementedError("Unsupported for boto3 client") + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + response = self.boto3_client.put_object_acl(**params) + log_command_execution("S3 put object ACL", response) + return response.get("Grants") @reporter.step("Get object ACL") @report_error diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index ab0cee3..9b85766 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -47,7 +47,6 @@ def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: Versi if status == VersioningStatus.UNDEFINED: return - s3_client.get_bucket_versioning_status(bucket) s3_client.put_bucket_versioning(bucket, status=status) bucket_status = s3_client.get_bucket_versioning_status(bucket) assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 4003dfd..3c6c268 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -440,9 +440,11 @@ class ClusterStateController: self.await_node_status(status, wallet, cluster_node) @wait_for_success(80, 8, title="Wait for node status become {status}") - def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode): + def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode, checker_node: ClusterNode = None): frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) - netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(cluster_node.storage_node.get_rpc_endpoint()).stdout) + if not checker_node: + checker_node = cluster_node + netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) netmap = [node for node in netmap if cluster_node.host_ip == node.node] if status == NodeStatus.OFFLINE: assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index 1c30cec..9c36118 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -2,6 +2,8 @@ import itertools from concurrent.futures import Future, ThreadPoolExecutor from typing import Callable, Collection, Optional, Union +MAX_WORKERS = 50 + def parallel( fn: Union[Callable, list[Callable]], @@ -54,7 +56,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: futures: list[Future] = [] - with ThreadPoolExecutor(max_workers=len(fn_list)) as executor: + with ThreadPoolExecutor(max_workers=min(len(fn_list), MAX_WORKERS)) as executor: for fn in fn_list: task_args = _get_args(*args) task_kwargs = _get_kwargs(**kwargs) @@ -67,7 +69,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]: futures: list[Future] = [] - with ThreadPoolExecutor(max_workers=len(parallel_items)) as executor: + with ThreadPoolExecutor(max_workers=min(len(parallel_items), MAX_WORKERS)) as executor: for item in parallel_items: task_args = _get_args(*args) task_kwargs = _get_kwargs(**kwargs) From da16f3c3a52707a1c7e9c30835694f778f6c3aec Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 21 Jun 2024 10:41:28 +0300 Subject: [PATCH 167/274] [#248] add metrics methods --- src/frostfs_testlib/steps/metrics.py | 45 +++++++++++++++++++ .../storage/dataclasses/metrics.py | 10 ++--- 2 files changed, 50 insertions(+), 5 deletions(-) create mode 100644 src/frostfs_testlib/steps/metrics.py diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py new file mode 100644 index 0000000..d999171 --- /dev/null +++ b/src/frostfs_testlib/steps/metrics.py @@ -0,0 +1,45 @@ +import re + +from frostfs_testlib import reporter +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.storage.cluster import ClusterNode + + +@reporter.step("Check metrics result") +@wait_for_success(interval=10) +def check_metrics_counter( + cluster_nodes: list[ClusterNode], + operator: str = "==", + counter_exp: int = 0, + parse_from_command: bool = False, + **metrics_greps: str, +): + counter_act = 0 + for cluster_node in cluster_nodes: + counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) + assert eval( + f"{counter_act} {operator} {counter_exp}" + ), f"Expected: {counter_exp}, Actual: {counter_act} in node: {cluster_node}" + + +@reporter.step("Get metrics value from node: {node}") +def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str): + try: + command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps) + if parse_from_command: + metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps) + else: + metrics_counter = calc_metrics_count_from_stdout(command_result.stdout) + except RuntimeError as e: + metrics_counter = 0 + + return metrics_counter + + +@reporter.step("Parse metrics count and calc sum of result") +def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None): + if command: + result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout) + else: + result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout) + return sum(map(lambda x: int(float(x)), result)) diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py index c79dcf8..81e757c 100644 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -16,11 +16,6 @@ class StorageMetrics: self.host = host self.metrics_endpoint = metrics_endpoint - def get_metric_container(self, metric: str, cid: str) -> CommandResult: - shell = self.host.get_shell() - result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {metric} |grep {cid}") - return result - def get_metrics_search_by_greps(self, **greps) -> CommandResult: """ Get a metrics, search by: cid, metric_type, shard_id etc. @@ -34,3 +29,8 @@ class StorageMetrics: additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") return result + + def get_all_metrics(self) -> CommandResult: + shell = self.host.get_shell() + result = shell.exec(f"curl -s {self.metrics_endpoint}") + return result From c9e4c2c7bbded6a745e981e75f7cd1d234e74b22 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 25 Jun 2024 18:56:10 +0300 Subject: [PATCH 168/274] [#251] Update get object nodes command call Signed-off-by: a.berezin --- src/frostfs_testlib/cli/frostfs_cli/object.py | 1 + src/frostfs_testlib/steps/cli/object.py | 25 +++++++++++-------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 5d5bd91..55c92be 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -362,6 +362,7 @@ class FrostfsCliObject(CliCommand): trace: bool = False, root: bool = False, verify_presence_all: bool = False, + json: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index b84a3a2..7de7a71 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -722,21 +722,27 @@ def get_object_nodes( cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config) - result_object_nodes = cli.object.nodes( + response = cli.object.nodes( rpc_endpoint=endpoint, cid=cid, oid=oid, bearer=bearer, ttl=1 if is_direct else None, + json=True, xhdr=xhdr, timeout=timeout, verify_presence_all=verify_presence_all, ) - parsing_output = parse_cmd_table(result_object_nodes.stdout, "|") - list_object_nodes = [ - node for node in parsing_output if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true" - ] + response_json = json.loads(response.stdout) + # Currently, the command will show expected and confirmed nodes. + # And we (currently) count only nodes which are both expected and confirmed + object_nodes_id = { + required_node + for data_object in response_json["data_objects"] + for required_node in data_object["required_nodes"] + if required_node in data_object["confirmed_nodes"] + } netmap_nodes_list = parse_netmap_output( cli.netmap.snapshot( @@ -745,14 +751,11 @@ def get_object_nodes( ).stdout ) netmap_nodes = [ - netmap_node - for object_node in list_object_nodes - for netmap_node in netmap_nodes_list - if object_node["node_id"] == netmap_node.node_id + netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id ] - result = [ + object_nodes = [ cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip ] - return result + return object_nodes From 3a4204f2e4d9180b32f79a630d4d7ed48ef79657 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 28 Jun 2024 15:18:20 +0300 Subject: [PATCH 169/274] [#253] Update S3 clients and permission matrixes Signed-off-by: a.berezin --- src/frostfs_testlib/resources/s3_acl_grants.py | 6 +++--- src/frostfs_testlib/s3/aws_cli_client.py | 8 ++++---- src/frostfs_testlib/s3/boto3_client.py | 8 ++++---- src/frostfs_testlib/utils/file_utils.py | 11 ++++++++--- src/frostfs_testlib/utils/string_utils.py | 14 ++++++++++++++ 5 files changed, 33 insertions(+), 14 deletions(-) diff --git a/src/frostfs_testlib/resources/s3_acl_grants.py b/src/frostfs_testlib/resources/s3_acl_grants.py index 37005e8..a716bc5 100644 --- a/src/frostfs_testlib/resources/s3_acl_grants.py +++ b/src/frostfs_testlib/resources/s3_acl_grants.py @@ -4,6 +4,6 @@ ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROU CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"} # https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl -PRIVATE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT] -PUBLIC_READ_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_READ_GRANT] -PUBLIC_READ_WRITE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT] +PRIVATE_GRANTS = [] +PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT] +PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT] diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 3568037..ae9254c 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -1,7 +1,6 @@ import json import logging import os -import uuid from datetime import datetime from time import sleep from typing import Literal, Optional, Union @@ -11,6 +10,7 @@ from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, R from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.utils import string_utils # TODO: Refactor this code to use shell instead of _cmd_run from frostfs_testlib.utils.cli_utils import _configure_aws_cli @@ -68,7 +68,7 @@ class AwsCliClient(S3ClientWrapper): location_constraint: Optional[str] = None, ) -> str: if bucket is None: - bucket = str(uuid.uuid4()) + bucket = string_utils.unique_name("bucket-") if object_lock_enabled_for_bucket is None: object_lock = "" @@ -229,7 +229,7 @@ class AwsCliClient(S3ClientWrapper): if bucket is None: bucket = source_bucket if key is None: - key = os.path.join(os.getcwd(), str(uuid.uuid4())) + key = string_utils.unique_name("copy-object-") copy_source = f"{source_bucket}/{source_key}" cmd = ( @@ -315,7 +315,7 @@ class AwsCliClient(S3ClientWrapper): object_range: Optional[tuple[int, int]] = None, full_output: bool = False, ) -> dict | TestFile: - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index a8a7828..150570c 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -1,7 +1,6 @@ import json import logging import os -import uuid from datetime import datetime from functools import wraps from time import sleep @@ -16,6 +15,7 @@ from mypy_boto3_s3 import S3Client from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.utils import string_utils # TODO: Refactor this code to use shell instead of _cmd_run from frostfs_testlib.utils.cli_utils import _configure_aws_cli, log_command_execution @@ -115,7 +115,7 @@ class Boto3ClientWrapper(S3ClientWrapper): location_constraint: Optional[str] = None, ) -> str: if bucket is None: - bucket = str(uuid.uuid4()) + bucket = string_utils.unique_name("bucket-") params = {"Bucket": bucket} if object_lock_enabled_for_bucket is not None: @@ -439,7 +439,7 @@ class Boto3ClientWrapper(S3ClientWrapper): if bucket is None: bucket = source_bucket if key is None: - key = os.path.join(os.getcwd(), str(uuid.uuid4())) + key = string_utils.unique_name("copy-object-") copy_source = f"{source_bucket}/{source_key}" params = { @@ -476,7 +476,7 @@ class Boto3ClientWrapper(S3ClientWrapper): if full_output: return response - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) with open(test_file, "wb") as file: chunk = response["Body"].read(1024) while chunk: diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index e01ce31..c2b497f 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -6,6 +6,7 @@ from typing import Any, Optional from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.utils import string_utils logger = logging.getLogger("NeoLogger") @@ -41,7 +42,9 @@ def ensure_directory_opener(path, flags): return os.open(path, flags) -@reporter.step("Generate file with size {size}") +# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps +# Use object_size dt in future as argument +@reporter.step("Generate file") def generate_file(size: int) -> TestFile: """Generates a binary file with the specified size in bytes. @@ -51,7 +54,7 @@ def generate_file(size: int) -> TestFile: Returns: The path to the generated file. """ - test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) + test_file = TestFile(os.path.join(ASSETS_DIR, string_utils.unique_name("object-"))) with open(test_file, "wb", opener=ensure_directory_opener) as file: file.write(os.urandom(size)) logger.info(f"File with size {size} bytes has been generated: {test_file}") @@ -59,7 +62,9 @@ def generate_file(size: int) -> TestFile: return test_file -@reporter.step("Generate file with content of size {size}") +# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps +# Use object_size dt in future as argument +@reporter.step("Generate file with content") def generate_file_with_content( size: int, file_path: Optional[str | TestFile] = None, diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index a80192c..d8e91a4 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -1,12 +1,26 @@ import random import re import string +from datetime import datetime ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation +def unique_name(prefix: str = ""): + """ + Generate unique short name of anything with prefix. + This should be unique in scope of multiple runs + + Args: + prefix: prefix for unique name generation + Returns: + unique name string + """ + return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}" + + def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): """ Generate random string from source letters list From f4460194bcc24d22d698d0409e5336fe25390b1f Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 28 Jun 2024 17:13:35 +0300 Subject: [PATCH 170/274] [#252] add filter priority to get_filtered_logs method --- src/frostfs_testlib/hosting/docker_host.py | 1 + src/frostfs_testlib/hosting/interfaces.py | 3 +++ src/frostfs_testlib/steps/metrics.py | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 3c9883a..0fb5af0 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -240,6 +240,7 @@ class DockerHost(Host): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, + priority: Optional[str] = None ) -> str: client = self._get_docker_client() filtered_logs = "" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 13051e2..36c2804 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -297,6 +297,7 @@ class Host(ABC): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, + priority: Optional[str] = None ) -> str: """Get logs from host filtered by regex. @@ -305,6 +306,8 @@ class Host(ABC): since: If set, limits the time from which logs should be collected. Must be in UTC. until: If set, limits the time until which logs should be collected. Must be in UTC. unit: required unit. + priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. + For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. Returns: Found entries as str if any found. diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py index d999171..29e49d4 100644 --- a/src/frostfs_testlib/steps/metrics.py +++ b/src/frostfs_testlib/steps/metrics.py @@ -19,7 +19,7 @@ def check_metrics_counter( counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) assert eval( f"{counter_act} {operator} {counter_exp}" - ), f"Expected: {counter_exp}, Actual: {counter_act} in node: {cluster_node}" + ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}" @reporter.step("Get metrics value from node: {node}") From 376499a7e8c5dc2381da834b4c2cd7221da04371 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 28 Jun 2024 16:41:57 +0300 Subject: [PATCH 171/274] [#254] Added change for EC policy Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/netmap_parser.py | 2 ++ src/frostfs_testlib/storage/dataclasses/storage_object_info.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 94d12b8..23ac4da 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -15,6 +15,8 @@ class NetmapParser: "epoch_duration": r"Epoch duration: (?P\d+)", "inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P\d+)", "maximum_object_size": r"Maximum object size: (?P\d+)", + "maximum_count_of_data_shards": r"Maximum count of data shards: (?P\d+)", + "maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P\d+)", "withdrawal_fee": r"Withdrawal fee: (?P\d+)", "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 28fdaa5..1ecb300 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -70,6 +70,8 @@ class NodeNetInfo: epoch_duration: str = None inner_ring_candidate_fee: str = None maximum_object_size: str = None + maximum_count_of_data_shards: str = None + maximum_count_of_parity_shards: str = None withdrawal_fee: str = None homomorphic_hashing_disabled: str = None maintenance_mode_allowed: str = None From 429698944e94cc7bcc40e603aa868e9ba3d12481 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 3 Jul 2024 12:02:40 +0300 Subject: [PATCH 172/274] [#256] Allow to set mix of policies for containers and buckets Signed-off-by: a.berezin --- src/frostfs_testlib/load/load_config.py | 65 ++++++++++++------- tests/test_load_config.py | 84 ++++++++++++++++--------- 2 files changed, 95 insertions(+), 54 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 1128096..767e9f2 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -25,6 +25,16 @@ def convert_time_to_seconds(time: int | str | None) -> int: return seconds +def force_list(input: str | list[str]): + if input is None: + return None + + if isinstance(input, list): + return list(map(str.strip, input)) + + return [input.strip()] + + class LoadType(Enum): gRPC = "grpc" S3 = "s3" @@ -142,8 +152,29 @@ class K6ProcessAllocationStrategy(Enum): PER_ENDPOINT = "PER_ENDPOINT" +class MetaConfig: + def _get_field_formatter(self, field_name: str) -> Callable | None: + data_fields = fields(self) + formatters = [ + field.metadata["formatter"] + for field in data_fields + if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None + ] + if formatters: + return formatters[0] + + return None + + def __setattr__(self, field_name, value): + formatter = self._get_field_formatter(field_name) + if formatter: + value = formatter(value) + + super().__setattr__(field_name, value) + + @dataclass -class Preset: +class Preset(MetaConfig): # ------ COMMON ------ # Amount of objects which should be created objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False) @@ -158,13 +189,13 @@ class Preset: # Amount of containers which should be created containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) # Container placement policy for containers for gRPC - container_placement_policy: Optional[str] = metadata_field(grpc_preset_scenarios, "policy", None, False) + container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list) # ------ S3 ------ # Amount of buckets which should be created buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False) # S3 region (AKA placement policy for S3 buckets) - s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False) + s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list) # Delay between containers creation and object upload for preset object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False) @@ -177,7 +208,7 @@ class Preset: @dataclass -class PrometheusParams: +class PrometheusParams(MetaConfig): # Prometheus server URL server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False) # Prometheus trend stats @@ -187,7 +218,7 @@ class PrometheusParams: @dataclass -class LoadParams: +class LoadParams(MetaConfig): # ------- CONTROL PARAMS ------- # Load type can be gRPC, HTTP, S3. load_type: LoadType @@ -412,6 +443,11 @@ class LoadParams: # For preset calls, bool values are passed with just -- if the value is True return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else "" + if isinstance(meta_field.value, list): + return ( + " ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else "" + ) + return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'" @staticmethod @@ -431,25 +467,6 @@ class LoadParams: return fields_with_data or [] - def _get_field_formatter(self, field_name: str) -> Callable | None: - data_fields = fields(self) - formatters = [ - field.metadata["formatter"] - for field in data_fields - if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None - ] - if formatters: - return formatters[0] - - return None - - def __setattr__(self, field_name, value): - formatter = self._get_field_formatter(field_name) - if formatter: - value = formatter(value) - - super().__setattr__(field_name, value) - def __str__(self) -> str: load_type_str = self.scenario.value if self.scenario else self.load_type.value # TODO: migrate load_params defaults to testlib diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 62339f6..883b1f2 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -3,14 +3,7 @@ from typing import Any, get_args import pytest -from frostfs_testlib.load.load_config import ( - EndpointSelectionStrategy, - LoadParams, - LoadScenario, - LoadType, - Preset, - ReadFrom, -) +from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME from frostfs_testlib.storage.cluster import ClusterNode @@ -99,9 +92,7 @@ class TestLoadConfig: def test_load_controller_string_representation(self, load_params: LoadParams): load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL load_params.object_size = 512 - background_load_controller = BackgroundLoadController( - "tmp", load_params, "wallet", None, None, DefaultRunner(None) - ) + background_load_controller = BackgroundLoadController("tmp", load_params, None, None, DefaultRunner(None)) expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" assert f"{background_load_controller}" == expected assert repr(background_load_controller) == expected @@ -141,7 +132,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--local", @@ -173,7 +164,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--local", @@ -214,7 +205,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--buckets '13'", - "--location 's3_location'", + "--location 's3_location' --location 's3_location_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -248,7 +239,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--buckets '13'", - "--location 's3_location'", + "--location 's3_location' --location 's3_location_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -288,7 +279,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--buckets '13'", - "--location 's3_location'", + "--location 's3_location' --location 's3_location_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -329,7 +320,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -362,12 +353,13 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", ] expected_env_vars = { + "CONFIG_DIR": "config_dir", "CONFIG_FILE": "config_file", "DURATION": 9, "WRITE_OBJ_SIZE": 11, @@ -380,12 +372,49 @@ class TestLoadConfig: "DELETERS": 8, "READ_AGE": 8, "STREAMING": 9, + "MAX_TOTAL_SIZE_GB": 17, "PREGEN_JSON": "pregen_json", } self._check_preset_params(load_params, expected_preset_args) self._check_env_vars(load_params, expected_env_vars) + @pytest.mark.parametrize( + "input, value, params", + [ + (["A C ", " B"], ["A C", "B"], [f"--policy 'A C' --policy 'B'"]), + (" A ", ["A"], ["--policy 'A'"]), + (" A , B ", ["A , B"], ["--policy 'A , B'"]), + ([" A", "B "], ["A", "B"], ["--policy 'A' --policy 'B'"]), + (None, None, []), + ], + ) + def test_grpc_list_parsing_formatter(self, input, value, params): + load_params = LoadParams(LoadType.gRPC) + load_params.preset = Preset() + load_params.preset.container_placement_policy = input + assert load_params.preset.container_placement_policy == value + + self._check_preset_params(load_params, params) + + @pytest.mark.parametrize( + "input, value, params", + [ + (["A C ", " B"], ["A C", "B"], [f"--location 'A C' --location 'B'"]), + (" A ", ["A"], ["--location 'A'"]), + (" A , B ", ["A , B"], ["--location 'A , B'"]), + ([" A", "B "], ["A", "B"], ["--location 'A' --location 'B'"]), + (None, None, []), + ], + ) + def test_s3_list_parsing_formatter(self, input, value, params): + load_params = LoadParams(LoadType.S3) + load_params.preset = Preset() + load_params.preset.s3_location = input + assert load_params.preset.s3_location == value + + self._check_preset_params(load_params, params) + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): expected_env_vars = { @@ -592,6 +621,7 @@ class TestLoadConfig: "--acl ''", ] expected_env_vars = { + "CONFIG_DIR": "", "CONFIG_FILE": "", "DURATION": 0, "WRITE_OBJ_SIZE": 0, @@ -599,6 +629,7 @@ class TestLoadConfig: "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", + "MAX_TOTAL_SIZE_GB": 0, "WRITERS": 0, "READERS": 0, "DELETERS": 0, @@ -689,9 +720,7 @@ class TestLoadConfig: value = getattr(dataclass, field.name) assert value is not None, f"{field.name} is not None" - def _get_filled_load_params( - self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False - ) -> LoadParams: + def _get_filled_load_params(self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False) -> LoadParams: load_type_map = { LoadScenario.S3: LoadType.S3, LoadScenario.S3_CAR: LoadType.S3, @@ -708,13 +737,12 @@ class TestLoadConfig: meta_fields = self._get_meta_fields(load_params) for field in meta_fields: - if ( - getattr(field.instance, field.field.name) is None - and load_params.scenario in field.field.metadata["applicable_scenarios"] - ): + if getattr(field.instance, field.field.name) is None and load_params.scenario in field.field.metadata["applicable_scenarios"]: value_to_set_map = { int: 0 if set_emtpy else len(field.field.name), + float: 0 if set_emtpy else len(field.field.name), str: "" if set_emtpy else field.field.name, + list[str]: "" if set_emtpy else [field.field.name, f"{field.field.name}_2"], bool: False if set_emtpy else True, } value_to_set = value_to_set_map[field.field_type] @@ -727,11 +755,7 @@ class TestLoadConfig: def _get_meta_fields(self, instance): data_fields = fields(instance) - fields_with_data = [ - MetaTestField(field, self._get_actual_field_type(field), instance) - for field in data_fields - if field.metadata - ] + fields_with_data = [MetaTestField(field, self._get_actual_field_type(field), instance) for field in data_fields if field.metadata] for field in data_fields: actual_field_type = self._get_actual_field_type(field) From 996f92ffa79668d479070e3793d488722b9a9db2 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Wed, 10 Jul 2024 17:17:27 +0300 Subject: [PATCH 173/274] [#259] Improve logging of boto3 client requests Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/boto3_client.py | 221 +++++++++++++------------ src/frostfs_testlib/utils/cli_utils.py | 15 +- 2 files changed, 128 insertions(+), 108 deletions(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 150570c..5686b78 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -18,7 +18,7 @@ from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _ma from frostfs_testlib.utils import string_utils # TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import _configure_aws_cli, log_command_execution +from frostfs_testlib.utils.cli_utils import log_command_execution from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") @@ -34,7 +34,15 @@ def report_error(func): try: return func(*a, **kw) except ClientError as err: - log_command_execution("Result", str(err)) + url = None + params = {"args": a, "kwargs": kw} + + if isinstance(a[0], Boto3ClientWrapper): + client: Boto3ClientWrapper = a[0] + url = client.s3gate_endpoint + params = {"args": a[1:], "kwargs": kw} + + log_command_execution(url, f"Failed {err.operation_name}", err.response, params) raise return deco @@ -90,7 +98,7 @@ class Boto3ClientWrapper(S3ClientWrapper): verify=False, ) - def _to_s3_param(self, param: str): + def _to_s3_param(self, param: str) -> str: replacement_map = { "Acl": "ACL", "Cors": "CORS", @@ -101,6 +109,11 @@ class Boto3ClientWrapper(S3ClientWrapper): result = result.replace(find, replace) return result + def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict: + if not exclude: + exclude = ["self"] + return {self._to_s3_param(param): value for param, value in scope if param not in exclude and value is not None} + # BUCKET METHODS # @reporter.step("Create bucket S3") @report_error @@ -133,7 +146,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) s3_bucket = self.boto3_client.create_bucket(**params) - log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) + log_command_execution(self.s3gate_endpoint, f"Created S3 bucket {bucket}", s3_bucket, params) return bucket @reporter.step("List buckets S3") @@ -142,7 +155,7 @@ class Boto3ClientWrapper(S3ClientWrapper): found_buckets = [] response = self.boto3_client.list_buckets() - log_command_execution("S3 List buckets result", response) + log_command_execution(self.s3gate_endpoint, "S3 List buckets result", response) for bucket in response["Buckets"]: found_buckets.append(bucket["Name"]) @@ -153,26 +166,27 @@ class Boto3ClientWrapper(S3ClientWrapper): @report_error def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) - log_command_execution("S3 Delete bucket result", response) + log_command_execution(self.s3gate_endpoint, "S3 Delete bucket result", response, {"Bucket": bucket}) @reporter.step("Head bucket S3") @report_error def head_bucket(self, bucket: str) -> None: response = self.boto3_client.head_bucket(Bucket=bucket) - log_command_execution("S3 Head bucket result", response) + log_command_execution(self.s3gate_endpoint, "S3 Head bucket result", response, {"Bucket": bucket}) @reporter.step("Put bucket versioning status") @report_error def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - response = self.boto3_client.put_bucket_versioning(Bucket=bucket, VersioningConfiguration={"Status": status.value}) - log_command_execution("S3 Set bucket versioning to", response) + params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}} + response = self.boto3_client.put_bucket_versioning(**params) + log_command_execution(self.s3gate_endpoint, "S3 Set bucket versioning to", response, params) @reporter.step("Get bucket versioning status") @report_error def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: response = self.boto3_client.get_bucket_versioning(Bucket=bucket) status = response.get("Status") - log_command_execution("S3 Got bucket versioning status", response) + log_command_execution(self.s3gate_endpoint, "S3 Got bucket versioning status", response, {"Bucket": bucket}) return status @reporter.step("Put bucket tagging") @@ -180,28 +194,29 @@ class Boto3ClientWrapper(S3ClientWrapper): def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging) - log_command_execution("S3 Put bucket tagging", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) + response = self.boto3_client.put_bucket_tagging(**params) + log_command_execution(self.s3gate_endpoint, "S3 Put bucket tagging", response, params) @reporter.step("Get bucket tagging") @report_error def get_bucket_tagging(self, bucket: str) -> list: response = self.boto3_client.get_bucket_tagging(Bucket=bucket) - log_command_execution("S3 Get bucket tagging", response) + log_command_execution(self.s3gate_endpoint, "S3 Get bucket tagging", response, {"Bucket": bucket}) return response.get("TagSet") @reporter.step("Get bucket acl") @report_error def get_bucket_acl(self, bucket: str) -> list: response = self.boto3_client.get_bucket_acl(Bucket=bucket) - log_command_execution("S3 Get bucket acl", response) + log_command_execution(self.s3gate_endpoint, "S3 Get bucket acl", response, {"Bucket": bucket}) return response.get("Grants") @reporter.step("Delete bucket tagging") @report_error def delete_bucket_tagging(self, bucket: str) -> None: response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) - log_command_execution("S3 Delete bucket tagging", response) + log_command_execution(self.s3gate_endpoint, "S3 Delete bucket tagging", response, {"Bucket": bucket}) @reporter.step("Put bucket ACL") @report_error @@ -212,71 +227,74 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.put_bucket_acl(**params) - log_command_execution("S3 ACL bucket result", response) + log_command_execution(self.s3gate_endpoint, "S3 ACL bucket result", response, params) @reporter.step("Put object lock configuration") @report_error def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration) - log_command_execution("S3 put_object_lock_configuration result", response) + params = {"Bucket": bucket, "ObjectLockConfiguration": configuration} + response = self.boto3_client.put_object_lock_configuration(**params) + log_command_execution(self.s3gate_endpoint, "S3 put_object_lock_configuration result", response, params) return response @reporter.step("Get object lock configuration") @report_error def get_object_lock_configuration(self, bucket: str) -> dict: response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) - log_command_execution("S3 get_object_lock_configuration result", response) + log_command_execution(self.s3gate_endpoint, "S3 get_object_lock_configuration result", response, {"Bucket": bucket}) return response.get("ObjectLockConfiguration") @reporter.step("Get bucket policy") @report_error def get_bucket_policy(self, bucket: str) -> str: response = self.boto3_client.get_bucket_policy(Bucket=bucket) - log_command_execution("S3 get_bucket_policy result", response) + log_command_execution(self.s3gate_endpoint, "S3 get_bucket_policy result", response, {"Bucket": bucket}) return response.get("Policy") @reporter.step("Delete bucket policy") @report_error def delete_bucket_policy(self, bucket: str) -> str: response = self.boto3_client.delete_bucket_policy(Bucket=bucket) - log_command_execution("S3 delete_bucket_policy result", response) + log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_policy result", response, {"Bucket": bucket}) return response @reporter.step("Put bucket policy") @report_error def put_bucket_policy(self, bucket: str, policy: dict) -> None: - response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)) - log_command_execution("S3 put_bucket_policy result", response) + params = {"Bucket": bucket, "Policy": json.dumps(policy)} + response = self.boto3_client.put_bucket_policy(**params) + log_command_execution(self.s3gate_endpoint, "S3 put_bucket_policy result", response, params) return response @reporter.step("Get bucket cors") @report_error def get_bucket_cors(self, bucket: str) -> dict: response = self.boto3_client.get_bucket_cors(Bucket=bucket) - log_command_execution("S3 get_bucket_cors result", response) + log_command_execution(self.s3gate_endpoint, "S3 get_bucket_cors result", response, {"Bucket": bucket}) return response.get("CORSRules") @reporter.step("Get bucket location") @report_error def get_bucket_location(self, bucket: str) -> str: response = self.boto3_client.get_bucket_location(Bucket=bucket) - log_command_execution("S3 get_bucket_location result", response) + log_command_execution(self.s3gate_endpoint, "S3 get_bucket_location result", response, {"Bucket": bucket}) return response.get("LocationConstraint") @reporter.step("Put bucket cors") @report_error def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration) - log_command_execution("S3 put_bucket_cors result", response) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_client.put_bucket_cors(**params) + log_command_execution(self.s3gate_endpoint, "S3 put_bucket_cors result", response, params) return response @reporter.step("Delete bucket cors") @report_error def delete_bucket_cors(self, bucket: str) -> None: response = self.boto3_client.delete_bucket_cors(Bucket=bucket) - log_command_execution("S3 delete_bucket_cors result", response) + log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_cors result", response, {"Bucket": bucket}) # END OF BUCKET METHODS # # OBJECT METHODS # @@ -284,8 +302,9 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("List objects S3 v2") @report_error def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.list_objects_v2(Bucket=bucket) - log_command_execution("S3 v2 List objects result", response) + log_command_execution(self.s3gate_endpoint, "S3 v2 List objects result", response, params) obj_list = [obj["Key"] for obj in response.get("Contents", [])] logger.info(f"Found s3 objects: {obj_list}") @@ -295,8 +314,9 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("List objects S3") @report_error def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.list_objects(Bucket=bucket) - log_command_execution("S3 List objects result", response) + log_command_execution(self.s3gate_endpoint, "S3 List objects result", response, params) obj_list = [obj["Key"] for obj in response.get("Contents", [])] logger.info(f"Found s3 objects: {obj_list}") @@ -306,15 +326,17 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("List objects versions S3") @report_error def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution("S3 List objects versions result", response) + log_command_execution(self.s3gate_endpoint, "S3 List objects versions result", response, params) return response if full_output else response.get("Versions", []) @reporter.step("List objects delete markers S3") @report_error def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution("S3 List objects delete markers result", response) + log_command_execution(self.s3gate_endpoint, "S3 List objects delete markers result", response, params) return response if full_output else response.get("DeleteMarkers", []) @reporter.step("Put object S3") @@ -339,36 +361,33 @@ class Boto3ClientWrapper(S3ClientWrapper): with open(filepath, "rb") as put_file: body = put_file.read() - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self", "filepath", "put_file"] and value is not None - } - response = self.boto3_client.put_object(**params) - log_command_execution("S3 Put object result", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "filepath", "put_file", "body"]) + response = self.boto3_client.put_object(Body=body, **params) + log_command_execution(self.s3gate_endpoint, "S3 Put object result", response, params) return response.get("VersionId") @reporter.step("Head object S3") @report_error def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.head_object(**params) - log_command_execution("S3 Head object result", response) + log_command_execution(self.s3gate_endpoint, "S3 Head object result", response, params) return response @reporter.step("Delete object S3") @report_error def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.delete_object(**params) - log_command_execution("S3 Delete object result", response) + log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) return response @reporter.step("Delete objects S3") @report_error def delete_objects(self, bucket: str, keys: list[str]) -> dict: - response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys)) - log_command_execution("S3 Delete objects result", response) + params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)} + response = self.boto3_client.delete_objects(**params) + log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params) assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' @@ -387,8 +406,9 @@ class Boto3ClientWrapper(S3ClientWrapper): for object_version in object_versions ] } - response = self.boto3_client.delete_objects(Bucket=bucket, Delete=delete_list) - log_command_execution("S3 Delete objects result", response) + params = {"Bucket": bucket, "Delete": delete_list} + response = self.boto3_client.delete_objects(**params) + log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params) return response @reporter.step("Delete object versions S3 without delete markers") @@ -396,8 +416,9 @@ class Boto3ClientWrapper(S3ClientWrapper): def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: - response = self.boto3_client.delete_object(Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"]) - log_command_execution("S3 Delete object result", response) + params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]} + response = self.boto3_client.delete_object(**params) + log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) @reporter.step("Put object ACL") @report_error @@ -409,17 +430,17 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> list: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.put_object_acl(**params) - log_command_execution("S3 put object ACL", response) + log_command_execution(self.s3gate_endpoint, "S3 put object ACL", response, params) return response.get("Grants") @reporter.step("Get object ACL") @report_error def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.get_object_acl(**params) - log_command_execution("S3 ACL objects result", response) + log_command_execution(self.s3gate_endpoint, "S3 ACL objects result", response, params) return response.get("Grants") @reporter.step("Copy object S3") @@ -442,13 +463,9 @@ class Boto3ClientWrapper(S3ClientWrapper): key = string_utils.unique_name("copy-object-") copy_source = f"{source_bucket}/{source_key}" - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self", "source_bucket", "source_key"] and value is not None - } + params = self._convert_to_s3_params(locals().items(), exclude=["self", "source_bucket", "source_key"]) response = self.boto3_client.copy_object(**params) - log_command_execution("S3 Copy objects result", response) + log_command_execution(self.s3gate_endpoint, "S3 Copy objects result", response, params) return key @reporter.step("Get object S3") @@ -465,13 +482,12 @@ class Boto3ClientWrapper(S3ClientWrapper): if object_range: range_str = f"bytes={object_range[0]}-{object_range[1]}" - params = { - self._to_s3_param(param): value - for param, value in {**locals(), **{"Range": range_str}}.items() - if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None - } + params = self._convert_to_s3_params( + {**locals(), **{"Range": range_str}}.items(), + exclude=["self", "object_range", "full_output", "range_str"], + ) response = self.boto3_client.get_object(**params) - log_command_execution("S3 Get objects result", response) + log_command_execution(self.s3gate_endpoint, "S3 Get objects result", response, params) if full_output: return response @@ -487,8 +503,9 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Create multipart upload S3") @report_error def create_multipart_upload(self, bucket: str, key: str) -> str: - response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key) - log_command_execution("S3 Created multipart upload", response) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_client.create_multipart_upload(**params) + log_command_execution(self.s3gate_endpoint, "S3 Created multipart upload", response, params) assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" return response["UploadId"] @@ -497,15 +514,16 @@ class Boto3ClientWrapper(S3ClientWrapper): @report_error def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: response = self.boto3_client.list_multipart_uploads(Bucket=bucket) - log_command_execution("S3 List multipart upload", response) + log_command_execution(self.s3gate_endpoint, "S3 List multipart upload", response, {"Bucket": bucket}) return response.get("Uploads") @reporter.step("Abort multipart upload S3") @report_error def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) - log_command_execution("S3 Abort multipart upload", response) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_client.abort_multipart_upload(**params) + log_command_execution(self.s3gate_endpoint, "S3 Abort multipart upload", response, params) @reporter.step("Upload part S3") @report_error @@ -513,14 +531,10 @@ class Boto3ClientWrapper(S3ClientWrapper): with open(filepath, "rb") as put_file: body = put_file.read() - response = self.boto3_client.upload_part( - UploadId=upload_id, - Bucket=bucket, - Key=key, - PartNumber=part_num, - Body=body, - ) - log_command_execution("S3 Upload part", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath", "body"]) + params["PartNumber"] = part_num + response = self.boto3_client.upload_part(Body=body, **params) + log_command_execution(self.s3gate_endpoint, "S3 Upload part", response, params) assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] @@ -528,14 +542,10 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Upload copy part S3") @report_error def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - response = self.boto3_client.upload_part_copy( - UploadId=upload_id, - Bucket=bucket, - Key=key, - PartNumber=part_num, - CopySource=copy_source, - ) - log_command_execution("S3 Upload copy part", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath"]) + params["PartNumber"] = part_num + response = self.boto3_client.upload_part_copy(**params) + log_command_execution(self.s3gate_endpoint, "S3 Upload copy part", response, params) assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" return response["CopyPartResult"]["ETag"] @@ -543,8 +553,9 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("List parts S3") @report_error def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key) - log_command_execution("S3 List part", response) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_client.list_parts(**params) + log_command_execution(self.s3gate_endpoint, "S3 List part", response, params) assert response.get("Parts"), f"Expected Parts in response:\n{response}" return response["Parts"] @@ -553,8 +564,10 @@ class Boto3ClientWrapper(S3ClientWrapper): @report_error def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] - response = self.boto3_client.complete_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts}) - log_command_execution("S3 Complete multipart upload", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "parts"]) + params["MultipartUpload"] = {"Parts": parts} + response = self.boto3_client.complete_multipart_upload(**params) + log_command_execution(self.s3gate_endpoint, "S3 Complete multipart upload", response, params) return response @@ -568,9 +581,9 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, bypass_governance_retention: Optional[bool] = None, ) -> None: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.put_object_retention(**params) - log_command_execution("S3 Put object retention ", response) + log_command_execution(self.s3gate_endpoint, "S3 Put object retention ", response, params) @reporter.step("Put object legal hold") @report_error @@ -582,35 +595,33 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, ) -> None: legal_hold = {"Status": legal_hold_status} - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self", "legal_hold_status"] and value is not None - } + params = self._convert_to_s3_params(locals().items(), exclude=["self", "legal_hold_status"]) response = self.boto3_client.put_object_legal_hold(**params) - log_command_execution("S3 Put object legal hold ", response) + log_command_execution(self.s3gate_endpoint, "S3 Put object legal hold ", response, params) @reporter.step("Put object tagging") @report_error def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id) - log_command_execution("S3 Put object tagging", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) + response = self.boto3_client.put_object_tagging(**params) + log_command_execution(self.s3gate_endpoint, "S3 Put object tagging", response, params) @reporter.step("Get object tagging") @report_error def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.get_object_tagging(**params) - log_command_execution("S3 Get object tagging", response) + log_command_execution(self.s3gate_endpoint, "S3 Get object tagging", response, params) return response.get("TagSet") @reporter.step("Delete object tagging") @report_error def delete_object_tagging(self, bucket: str, key: str) -> None: - response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key) - log_command_execution("S3 Delete object tagging", response) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_client.delete_object_tagging(**params) + log_command_execution(self.s3gate_endpoint, "S3 Delete object tagging", response, params) @reporter.step("Get object attributes") @report_error diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 0a1b5fd..d22f5c1 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -15,7 +15,7 @@ from contextlib import suppress from datetime import datetime from io import StringIO from textwrap import shorten -from typing import Dict, List, TypedDict, Union +from typing import Dict, List, Optional, TypedDict, Union import pexpect @@ -75,12 +75,21 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date reporter.attach(command_attachment, "Command execution") -def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None: +def log_command_execution(url: str, cmd: str, output: Union[str, TypedDict], params: Optional[dict] = None) -> None: logger.info(f"{cmd}: {output}") + with suppress(Exception): json_output = json.dumps(output, indent=4, sort_keys=True) output = json_output - command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" + + try: + json_params = json.dumps(params, indent=4, sort_keys=True) + except TypeError as err: + logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}") + else: + params = json_params + + command_attachment = f"COMMAND: '{cmd}'\n" f"URL: {url}\n" f"PARAMS:\n{params}\n" f"OUTPUT:\n{output}\n" with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): reporter.attach(command_attachment, "Command execution") From 6f99aef4065a8ab045cc7ea483252bbd63284954 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 16 Jul 2024 13:36:02 +0300 Subject: [PATCH 174/274] [#263] Unify version parsing Function `_parse_version` renamed to `parse_version` and changed regex for version parsing Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/utils/version_utils.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 7fcc9de..490abb0 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -18,14 +18,14 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]: out = shell.exec(f"{binary} --version").stdout - versions[binary] = _parse_version(out) + versions[binary] = parse_version(out) frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) - versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout) + versions[FROSTFS_CLI_EXEC] = parse_version(frostfs_cli.version.get().stdout) try: frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC) - versions[FROSTFS_ADM_EXEC] = _parse_version(frostfs_adm.version.get().stdout) + versions[FROSTFS_ADM_EXEC] = parse_version(frostfs_adm.version.get().stdout) except RuntimeError: logger.info(f"{FROSTFS_ADM_EXEC} not installed") @@ -63,7 +63,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: binary_path = binary["exec_path"] try: result = shell.exec(f"{binary_path} {binary['param']}") - version = _parse_version(result.stdout) or _parse_version(result.stderr) or "Unknown" + version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" versions_at_host[binary_name] = version except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") @@ -85,6 +85,6 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]: return versions_by_host -def _parse_version(version_output: str) -> str: - version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) - return version.group(1).strip() if version else version_output +def parse_version(version_output: str) -> str: + version = re.search(r"(?<=version[:=])\s?[\"\']?v?(.+)", version_output, re.IGNORECASE) + return version.group(1).strip("\"'\n\t ") if version else version_output From b6a657e76c03818bf0f663284a6c5036ab713687 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Tue, 9 Jul 2024 14:47:32 +0300 Subject: [PATCH 175/274] [#258] add tests for preupgrade --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 2 +- .../storage/controllers/cluster_state_controller.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 1d753d9..d8fd61c 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -110,7 +110,7 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def dump_hashes(self, rpc_endpoint: str) -> CommandResult: + def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult: """Dump deployed contract hashes. Args: diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 3c6c268..cec5ed3 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -531,3 +531,11 @@ class ClusterStateController: except Exception as err: logger.warning(f"Host ping fails with error {err}") return HostStatus.ONLINE + + @reporter.step("Get contract by domain - {domain_name}") + def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): + frostfs_adm = FrostfsAdm( + shell=cluster_node.host.get_shell(), + frostfs_adm_exec_path=FROSTFS_ADM_EXEC, + ) + return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_endpoint(), domain_name).stdout From f472d7e1ce93c1e1fecbd612150cf650c5a95123 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Tue, 16 Jul 2024 13:00:50 +0300 Subject: [PATCH 176/274] [#261] Add error pattern no rule --- src/frostfs_testlib/resources/error_patterns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 5491a7a..3b9231e 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -28,3 +28,4 @@ S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" +NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" From 40dfd015a854d1d34cd00acdac2a6cc12b1cd8a0 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 18 Jul 2024 00:00:48 +0300 Subject: [PATCH 177/274] [#264] Add APE related commands Signed-off-by: a.berezin --- .../cli/frostfs_cli/ape_manager.py | 70 +++++++++++ src/frostfs_testlib/cli/frostfs_cli/bearer.py | 54 ++++++++ src/frostfs_testlib/cli/frostfs_cli/cli.py | 4 + src/frostfs_testlib/cli/frostfs_cli/util.py | 8 ++ .../storage/dataclasses/ape.py | 115 ++++++++++++++++++ .../testing/cluster_test_base.py | 5 +- src/frostfs_testlib/utils/string_utils.py | 4 +- 7 files changed, 257 insertions(+), 3 deletions(-) create mode 100644 src/frostfs_testlib/cli/frostfs_cli/ape_manager.py create mode 100644 src/frostfs_testlib/cli/frostfs_cli/bearer.py create mode 100644 src/frostfs_testlib/storage/dataclasses/ape.py diff --git a/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py b/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py new file mode 100644 index 0000000..525a9be --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py @@ -0,0 +1,70 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliApeManager(CliCommand): + """Operations with APE manager.""" + + def add( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] | Optional[list[str]] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Add rule chain for a target.""" + + return self._execute( + "ape-manager add", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list( + self, + rpc_endpoint: str, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "ape-manager list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "ape-manager remove", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/bearer.py b/src/frostfs_testlib/cli/frostfs_cli/bearer.py new file mode 100644 index 0000000..e21a6c8 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/bearer.py @@ -0,0 +1,54 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliBearer(CliCommand): + def create( + self, + rpc_endpoint: str, + out: str, + issued_at: Optional[str] = None, + expire_at: Optional[str] = None, + not_valid_before: Optional[str] = None, + ape: Optional[str] = None, + eacl: Optional[str] = None, + owner: Optional[str] = None, + json: Optional[bool] = False, + impersonate: Optional[bool] = False, + wallet: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Create bearer token. + + All epoch flags can be specified relative to the current epoch with the +n syntax. + In this case --rpc-endpoint flag should be specified and the epoch in bearer token + is set to current epoch + n. + """ + return self._execute( + "bearer create", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def generate_ape_override( + self, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + cid: Optional[str] = None, + output: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "bearer generate-ape-override", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index c20a987..d83b7ae 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -2,6 +2,8 @@ from typing import Optional from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL +from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager +from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap @@ -41,3 +43,5 @@ class FrostfsCli: self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file) + self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file) + self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/util.py b/src/frostfs_testlib/cli/frostfs_cli/util.py index 7914169..37347a5 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/util.py +++ b/src/frostfs_testlib/cli/frostfs_cli/util.py @@ -54,3 +54,11 @@ class FrostfsCliUtil(CliCommand): "util sign session-token", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False): + """Convert representation of extended ACL table.""" + + return self._execute( + "util convert eacl", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py new file mode 100644 index 0000000..84b3033 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -0,0 +1,115 @@ +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Optional + +from frostfs_testlib.testing.readable import HumanReadableEnum +from frostfs_testlib.utils import string_utils + +logger = logging.getLogger("NeoLogger") +EACL_LIFETIME = 100500 +FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 + + +class ObjectOperations(HumanReadableEnum): + PUT = "object.put" + GET = "object.get" + HEAD = "object.head" + GET_RANGE = "object.range" + GET_RANGE_HASH = "object.hash" + SEARCH = "object.search" + DELETE = "object.delete" + WILDCARD_ALL = "object.*" + + @staticmethod + def get_all(): + return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] + + +class Verb(HumanReadableEnum): + ALLOW = "allow" + DENY = "deny" + + +class Role(HumanReadableEnum): + OWNER = "owner" + IR = "ir" + CONTAINER = "container" + OTHERS = "others" + + +class ConditionType(HumanReadableEnum): + RESOURCE = "ResourceCondition" + REQUEST = "RequestCondition" + + +# See https://git.frostfs.info/TrueCloudLab/policy-engine/src/branch/master/schema/native/consts.go#L40-L53 +class ConditionKey(HumanReadableEnum): + ROLE = '"\\$Actor:role"' + PUBLIC_KEY = '"\\$Actor:publicKey"' + + +class MatchType(HumanReadableEnum): + EQUAL = "=" + NOT_EQUAL = "!=" + + +@dataclass +class Condition: + condition_key: ConditionKey | str + condition_value: str + condition_type: ConditionType = ConditionType.REQUEST + match_type: MatchType = MatchType.EQUAL + + def as_string(self): + key = self.condition_key.value if isinstance(self.condition_key, ConditionKey) else self.condition_key + value = self.condition_value.value if isinstance(self.condition_value, Enum) else self.condition_value + + return f"{self.condition_type.value}:{key}{self.match_type.value}{value}" + + @staticmethod + def by_role(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.ROLE, *args, **kwargs) + + @staticmethod + def by_key(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs) + + +class Rule: + def __init__( + self, + access: Verb, + operations: list[ObjectOperations] | ObjectOperations, + conditions: list[Condition] | Condition = None, + chain_id: Optional[str] = None, + ) -> None: + self.access = access + self.operations = operations + + if not conditions: + self.conditions = [] + elif isinstance(conditions, Condition): + self.conditions = [conditions] + else: + self.conditions = conditions + + if not isinstance(self.conditions, list): + raise RuntimeError("Conditions must be a list") + + if not operations: + self.operations = [] + elif isinstance(operations, ObjectOperations): + self.operations = [operations] + else: + self.operations = operations + + if not isinstance(self.operations, list): + raise RuntimeError("Operations must be a list") + + self.chain_id = chain_id if chain_id else string_utils.unique_name("chain-id-") + + def as_string(self): + conditions = " ".join([cond.as_string() for cond in self.conditions]) + operations = " ".join([op.value for op in self.operations]) + return f"{self.access.value} {operations} {conditions} *" diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index 49c6afd..f2e10ad 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -32,7 +32,7 @@ class ClusterTestBase: ): epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) if wait_block: - time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * wait_block) + self.wait_for_blocks(wait_block) def wait_for_epochs_align(self): epoch.wait_for_epochs_align(self.shell, self.cluster) @@ -42,3 +42,6 @@ class ClusterTestBase: def ensure_fresh_epoch(self): return epoch.ensure_fresh_epoch(self.shell, self.cluster) + + def wait_for_blocks(self, blocks_count: int = 1): + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * blocks_count) diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index d8e91a4..80efa65 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -8,7 +8,7 @@ DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation -def unique_name(prefix: str = ""): +def unique_name(prefix: str = "", postfix: str = ""): """ Generate unique short name of anything with prefix. This should be unique in scope of multiple runs @@ -18,7 +18,7 @@ def unique_name(prefix: str = ""): Returns: unique name string """ - return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}" + return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{postfix}" def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): From 4c0d76408cf7eade7fc5ac1a460b12c2335bdb5c Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 18 Jul 2024 18:21:46 +0300 Subject: [PATCH 178/274] [#265] Update codeowners Signed-off-by: a.berezin --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) create mode 100644 CODEOWNERS diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..4a621d3 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov From 166e44da9c3bcaf22ce1896ff829bbbc9819614a Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 18 Jul 2024 19:48:38 +0300 Subject: [PATCH 179/274] [#266] Remove duplicate messages in logs Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/shell/local_shell.py | 49 ++++++++++------------ src/frostfs_testlib/steps/cli/container.py | 1 - src/frostfs_testlib/utils/cli_utils.py | 5 +-- 3 files changed, 23 insertions(+), 32 deletions(-) diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index acf01ff..2fb6631 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -28,10 +28,10 @@ class LocalShell(Shell): for inspector in [*self.command_inspectors, *extra_inspectors]: command = inspector.inspect(original_command, command) - logger.info(f"Executing command: {command}") - if options.interactive_inputs: - return self._exec_interactive(command, options) - return self._exec_non_interactive(command, options) + with reporter.step(f"Executing command: {command}"): + if options.interactive_inputs: + return self._exec_interactive(command, options) + return self._exec_non_interactive(command, options) def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: start_time = datetime.utcnow() @@ -60,9 +60,7 @@ class LocalShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\n" - f"Output: {result.stdout}\n" - f"Stderr: {result.stderr}\n" + f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n" ) return result @@ -93,9 +91,7 @@ class LocalShell(Shell): stderr="", return_code=exc.returncode, ) - raise RuntimeError( - f"Command: {command}\nError:\n" f"return code: {exc.returncode}\n" f"output: {exc.output}" - ) from exc + raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc except OSError as exc: raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc finally: @@ -129,22 +125,19 @@ class LocalShell(Shell): end_time: datetime, result: Optional[CommandResult], ) -> None: - # TODO: increase logging level if return code is non 0, should be warning at least - logger.info( - f"Command: {command}\n" - f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n" - f"return code: {result.return_code if result else ''} " - f"\nOutput: {result.stdout if result else ''}" - ) + if not result: + logger.warning(f"Command: {command}\n" f"Error: result is None") + return - if result: - elapsed_time = end_time - start_time - command_attachment = ( - f"COMMAND: {command}\n" - f"RETCODE: {result.return_code}\n\n" - f"STDOUT:\n{result.stdout}\n" - f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" - ) - with reporter.step(f"COMMAND: {command}"): - reporter.attach(command_attachment, "Command execution.txt") + status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning) + log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}") + + elapsed_time = end_time - start_time + command_attachment = ( + f"COMMAND: {command}\n" + f"RETCODE: {result.return_code}\n\n" + f"STDOUT:\n{result.stdout}\n" + f"STDERR:\n{result.stderr}\n" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + ) + reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index fa739a8..641b321 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -200,7 +200,6 @@ def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Op """ cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) - logger.info(f"Containers: \n{result}") return result.stdout.split() diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index d22f5c1..8e019ea 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -75,7 +75,7 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date reporter.attach(command_attachment, "Command execution") -def log_command_execution(url: str, cmd: str, output: Union[str, TypedDict], params: Optional[dict] = None) -> None: +def log_command_execution(url: str, cmd: str, output: Union[str, dict], params: Optional[dict] = None) -> None: logger.info(f"{cmd}: {output}") with suppress(Exception): @@ -90,8 +90,7 @@ def log_command_execution(url: str, cmd: str, output: Union[str, TypedDict], par params = json_params command_attachment = f"COMMAND: '{cmd}'\n" f"URL: {url}\n" f"PARAMS:\n{params}\n" f"OUTPUT:\n{output}\n" - with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): - reporter.attach(command_attachment, "Command execution") + reporter.attach(command_attachment, "Command execution") def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: From 7a500330de02c6167485de9a48e9c833f812b8ce Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 26 Jul 2024 16:34:47 +0300 Subject: [PATCH 180/274] [#270] Updates related to testing platform Signed-off-by: a.berezin --- .devenv.hosting.yaml | 108 ++++++++++++++++++ pyproject.toml | 5 +- src/frostfs_testlib/__init__.py | 2 + src/frostfs_testlib/analytics/__init__.py | 2 +- .../analytics/test_exporter.py | 11 +- .../analytics/testrail_exporter.py | 34 ++---- src/frostfs_testlib/fixtures.py | 35 ++++++ src/frostfs_testlib/resources/common.py | 5 + 8 files changed, 168 insertions(+), 34 deletions(-) create mode 100644 .devenv.hosting.yaml create mode 100644 src/frostfs_testlib/fixtures.py diff --git a/.devenv.hosting.yaml b/.devenv.hosting.yaml new file mode 100644 index 0000000..d096625 --- /dev/null +++ b/.devenv.hosting.yaml @@ -0,0 +1,108 @@ +hosts: +- address: localhost + attributes: + sudo_shell: false + plugin_name: docker + healthcheck_plugin_name: basic + attributes: + skip_readiness_check: True + force_transactions: True + services: + - name: frostfs-storage_01 + attributes: + container_name: s01 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet01.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json + wallet_password: "" + volume_name: storage_storage_s01 + endpoint_data0: s01.frostfs.devenv:8080 + control_endpoint: s01.frostfs.devenv:8081 + un_locode: "RU MOW" + - name: frostfs-storage_02 + attributes: + container_name: s02 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet02.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json + wallet_password: "" + volume_name: storage_storage_s02 + endpoint_data0: s02.frostfs.devenv:8080 + control_endpoint: s02.frostfs.devenv:8081 + un_locode: "RU LED" + - name: frostfs-storage_03 + attributes: + container_name: s03 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet03.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json + wallet_password: "" + volume_name: storage_storage_s03 + endpoint_data0: s03.frostfs.devenv:8080 + control_endpoint: s03.frostfs.devenv:8081 + un_locode: "SE STO" + - name: frostfs-storage_04 + attributes: + container_name: s04 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet04.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json + wallet_password: "" + volume_name: storage_storage_s04 + endpoint_data0: s04.frostfs.devenv:8080 + control_endpoint: s04.frostfs.devenv:8081 + un_locode: "FI HEL" + - name: frostfs-s3_01 + attributes: + container_name: s3_gate + config_path: ../frostfs-dev-env/services/s3_gate/.s3.env + wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json + local_wallet_config_path: ./TemporaryDir/password-s3.yml + local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json + wallet_password: "s3" + endpoint_data0: https://s3.frostfs.devenv:8080 + - name: frostfs-http_01 + attributes: + container_name: http_gate + config_path: ../frostfs-dev-env/services/http_gate/.http.env + wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json + wallet_password: "one" + endpoint_data0: http://http.frostfs.devenv + - name: frostfs-ir_01 + attributes: + container_name: ir01 + config_path: ../frostfs-dev-env/services/ir/.ir.env + wallet_path: ../frostfs-dev-env/services/ir/az.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/ir/az.json + wallet_password: "one" + - name: neo-go_01 + attributes: + container_name: morph_chain + config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml + wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json + wallet_password: "one" + endpoint_internal0: http://morph-chain.frostfs.devenv:30333 + - name: main-chain_01 + attributes: + container_name: main_chain + config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml + wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json + wallet_password: "one" + endpoint_internal0: http://main-chain.frostfs.devenv:30333 + - name: coredns_01 + attributes: + container_name: coredns + clis: + - name: frostfs-cli + exec_path: frostfs-cli diff --git a/pyproject.toml b/pyproject.toml index 5a38dba..296ce65 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,4 +89,7 @@ push = false filterwarnings = [ "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", ] -testpaths = ["tests"] \ No newline at end of file +testpaths = ["tests"] + +[project.entry-points.pytest11] +testlib = "frostfs_testlib" \ No newline at end of file diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 159d48b..2cdaf4e 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1 +1,3 @@ __version__ = "2.0.1" + +from .fixtures import configure_testlib, hosting diff --git a/src/frostfs_testlib/analytics/__init__.py b/src/frostfs_testlib/analytics/__init__.py index 6995a08..b057418 100644 --- a/src/frostfs_testlib/analytics/__init__.py +++ b/src/frostfs_testlib/analytics/__init__.py @@ -1,5 +1,5 @@ from frostfs_testlib.analytics import test_case from frostfs_testlib.analytics.test_case import TestCasePriority from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector -from frostfs_testlib.analytics.test_exporter import TestExporter +from frostfs_testlib.analytics.test_exporter import TСExporter from frostfs_testlib.analytics.testrail_exporter import TestrailExporter diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index 5a569c6..dd6a7fb 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -3,7 +3,8 @@ from abc import ABC, abstractmethod from frostfs_testlib.analytics.test_collector import TestCase -class TestExporter(ABC): +# TODO: REMOVE ME +class TСExporter(ABC): test_cases_cache = [] test_suites_cache = [] @@ -46,9 +47,7 @@ class TestExporter(ABC): """ @abstractmethod - def update_test_case( - self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section - ) -> None: + def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: """ Update test case in TMS """ @@ -60,9 +59,7 @@ class TestExporter(ABC): for test_case in test_cases: test_suite = self.get_or_create_test_suite(test_case.suite_name) - test_section = self.get_or_create_suite_section( - test_suite, test_case.suite_section_name - ) + test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name) test_case_in_tms = self.search_test_case_id(test_case.id) steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] diff --git a/src/frostfs_testlib/analytics/testrail_exporter.py b/src/frostfs_testlib/analytics/testrail_exporter.py index 610fee5..36c482c 100644 --- a/src/frostfs_testlib/analytics/testrail_exporter.py +++ b/src/frostfs_testlib/analytics/testrail_exporter.py @@ -1,10 +1,10 @@ from testrail_api import TestRailAPI from frostfs_testlib.analytics.test_collector import TestCase -from frostfs_testlib.analytics.test_exporter import TestExporter +from frostfs_testlib.analytics.test_exporter import TСExporter -class TestrailExporter(TestExporter): +class TestrailExporter(TСExporter): def __init__( self, tr_url: str, @@ -62,19 +62,13 @@ class TestrailExporter(TestExporter): It's help do not call TMS each time then we search test case """ for test_suite in self.test_suites_cache: - self.test_cases_cache.extend( - self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"]) - ) + self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])) def search_test_case_id(self, test_case_id: str) -> object: """ Find test cases in TestRail (cache) by ID """ - test_cases = [ - test_case - for test_case in self.test_cases_cache - if test_case["custom_autotest_name"] == test_case_id - ] + test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id] if len(test_cases) > 1: raise RuntimeError(f"Too many results found in test rail for id {test_case_id}") @@ -87,9 +81,7 @@ class TestrailExporter(TestExporter): """ Get suite name with exact name from Testrail or create if not exist """ - test_rail_suites = [ - suite for suite in self.test_suites_cache if suite["name"] == test_suite_name - ] + test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name] if not test_rail_suites: test_rail_suite = self.api.suites.add_suite( @@ -102,17 +94,13 @@ class TestrailExporter(TestExporter): elif len(test_rail_suites) == 1: return test_rail_suites.pop() else: - raise RuntimeError( - f"Too many results found in test rail for suite name {test_suite_name}" - ) + raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}") def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: """ Get suite section with exact name from Testrail or create new one if not exist """ - test_rail_sections = [ - section for section in test_rail_suite["sections"] if section["name"] == section_name - ] + test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name] if not test_rail_sections: test_rail_section = self.api.sections.add_section( @@ -128,9 +116,7 @@ class TestrailExporter(TestExporter): elif len(test_rail_sections) == 1: return test_rail_sections.pop() else: - raise RuntimeError( - f"Too many results found in test rail for section name {section_name}" - ) + raise RuntimeError(f"Too many results found in test rail for section name {section_name}") def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict: """ @@ -164,9 +150,7 @@ class TestrailExporter(TestExporter): self.api.cases.add_case(**request_body) - def update_test_case( - self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section - ) -> None: + def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: """ Update test case in Testrail """ diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py new file mode 100644 index 0000000..8f6873f --- /dev/null +++ b/src/frostfs_testlib/fixtures.py @@ -0,0 +1,35 @@ +import logging +import os +from importlib.metadata import entry_points + +import pytest +import yaml + +from frostfs_testlib import reporter +from frostfs_testlib.hosting.hosting import Hosting +from frostfs_testlib.resources.common import HOSTING_CONFIG_FILE +from frostfs_testlib.storage import get_service_registry + + +@pytest.fixture(scope="session") +def configure_testlib(): + reporter.get_reporter().register_handler(reporter.AllureHandler()) + reporter.get_reporter().register_handler(reporter.StepsLogger()) + logging.getLogger("paramiko").setLevel(logging.INFO) + + # Register Services for cluster + registry = get_service_registry() + services = entry_points(group="frostfs.testlib.services") + for svc in services: + registry.register_service(svc.name, svc.load()) + + +@pytest.fixture(scope="session") +def hosting(configure_testlib) -> Hosting: + with open(HOSTING_CONFIG_FILE, "r") as file: + hosting_config = yaml.full_load(file) + + hosting_instance = Hosting() + hosting_instance.configure(hosting_config) + + return hosting_instance diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 7f8d2c4..03fdce9 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -46,3 +46,8 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file: MAX_REQUEST_ATTEMPTS = 5 RETRY_MODE = "standard" CREDENTIALS_CREATE_TIMEOUT = "1m" + + +HOSTING_CONFIG_FILE = os.getenv( + "HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml")) +) From a983e0566e73284f70ad6862eceb7c9ef21e120c Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 26 Jul 2024 19:36:20 +0300 Subject: [PATCH 181/274] [#272] Add --generate-key flag to object operations Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/frostfs_cli/container.py | 10 +++++++++- src/frostfs_testlib/cli/frostfs_cli/object.py | 16 ++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 43c3ec6..1ff217f 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -100,6 +100,7 @@ class FrostfsCliContainer(CliCommand): cid: str, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, await_mode: bool = False, to: Optional[str] = None, json_mode: bool = False, @@ -121,6 +122,7 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -136,6 +138,7 @@ class FrostfsCliContainer(CliCommand): cid: str, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, await_mode: bool = False, to: Optional[str] = None, session: Optional[str] = None, @@ -157,6 +160,7 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -172,6 +176,7 @@ class FrostfsCliContainer(CliCommand): rpc_endpoint: str, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, owner: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -189,6 +194,7 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -204,6 +210,7 @@ class FrostfsCliContainer(CliCommand): cid: str, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -219,6 +226,7 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -291,7 +299,7 @@ class FrostfsCliContainer(CliCommand): timeout: duration Timeout for the operation (default 15 s) short: shorten the output of node information. xhdr: Dict with request X-Headers. - generate_key: Generate a new private key + generate_key: Generate a new private key. Returns: diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 55c92be..070def0 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -13,6 +13,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -25,6 +26,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Filepath to a JSON- or binary-encoded token of the object DELETE session. @@ -49,6 +51,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, header: Optional[str] = None, no_progress: bool = False, @@ -66,6 +69,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. header: File to write header to. Default: stdout. no_progress: Do not show progress bar. oid: Object ID. @@ -93,6 +97,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, range: Optional[str] = None, salt: Optional[str] = None, ttl: Optional[int] = None, @@ -108,6 +113,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. range: Range to take hash from in the form offset1:length1,... rpc_endpoint: Remote node address (as 'multiaddr' or ':'). @@ -135,6 +141,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, json_mode: bool = False, main_only: bool = False, @@ -153,6 +160,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. json_mode: Marshal output in JSON. main_only: Return only main fields. oid: Object ID. @@ -183,6 +191,7 @@ class FrostfsCliObject(CliCommand): expire_at: Optional[int] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -195,6 +204,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. lifetime: Lock lifetime. expire_at: Lock expiration epoch. @@ -222,6 +232,7 @@ class FrostfsCliObject(CliCommand): address: Optional[str] = None, attributes: Optional[dict] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, copies_number: Optional[int] = None, disable_filename: bool = False, disable_timestamp: bool = False, @@ -246,6 +257,7 @@ class FrostfsCliObject(CliCommand): disable_timestamp: Do not set well-known timestamp attribute. expire_at: Last epoch in the life of the object. file: File with object payload. + generate_key: Generate new private key. no_progress: Do not show progress bar. notify: Object notification in the form of *epoch*:*topic*; '-' topic means using default. @@ -273,6 +285,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, json_mode: bool = False, raw: bool = False, @@ -289,6 +302,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. json_mode: Marshal output in JSON. oid: Object ID. range: Range to take data from in the form offset:length. @@ -315,6 +329,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, filters: Optional[list] = None, oid: Optional[str] = None, phy: bool = False, @@ -332,6 +347,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. filters: Repeated filter expressions or files with protobuf JSON. + generate_key: Generate new private key. oid: Object ID. phy: Search physically stored objects. root: Search for user objects. From 6b036a09b757782bbcde8ae087513075ca03e094 Mon Sep 17 00:00:00 2001 From: "s.makhov" Date: Wed, 31 Jul 2024 19:53:28 +0300 Subject: [PATCH 182/274] [#275] Add 'retry' and 'PRESET_CONTAINER_CREATION_RETRY_COUNT' variables to define max num of container creation retries --- src/frostfs_testlib/load/load_config.py | 2 ++ src/frostfs_testlib/resources/load_params.py | 1 + 2 files changed, 3 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 767e9f2..15103e0 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -190,6 +190,8 @@ class Preset(MetaConfig): containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) # Container placement policy for containers for gRPC container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list) + # Number of retries for creation of container + container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False) # ------ S3 ------ # Amount of buckets which should be created diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index 97193cc..ad3ed1c 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -26,6 +26,7 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv( ) BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") +PRESET_CONTAINER_CREATION_RETRY_COUNT = os.getenv("CONTAINER_CREATION_RETRY_COUNT", "20") # TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1") K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") From 8306a9f3ff4c231b145e6d2a2d264ce7cfe4973a Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 1 Aug 2024 16:32:41 +0300 Subject: [PATCH 183/274] [#276] Context manager for parralel func Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/testing/parallel.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index 9c36118..0549e61 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -1,10 +1,22 @@ import itertools from concurrent.futures import Future, ThreadPoolExecutor +from contextlib import contextmanager from typing import Callable, Collection, Optional, Union MAX_WORKERS = 50 +@contextmanager +def parallel_workers_limit(workers_count: int): + global MAX_WORKERS + original_value = MAX_WORKERS + MAX_WORKERS = workers_count + try: + yield + finally: + MAX_WORKERS = original_value + + def parallel( fn: Union[Callable, list[Callable]], parallel_items: Optional[Collection] = None, From ea60c2104a0941a2f19549f6f412b97fb11e6002 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 5 Aug 2024 09:18:05 +0300 Subject: [PATCH 184/274] [#277] MInor change for shard Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 98 +++++++++++++++++++ .../storage/controllers/shards_watcher.py | 30 +++--- 2 files changed, 113 insertions(+), 15 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 4399b13..e88707a 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -143,3 +143,101 @@ class FrostfsCliShards(CliCommand): **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) + def evacuation_start( + self, + endpoint: str, + id: Optional[str] = None, + scope: Optional[str] = None, + all: bool = False, + no_errors: bool = True, + await_mode: bool = False, + address: Optional[str] = None, + timeout: Optional[str] = None, + no_progress: bool = False, + ) -> CommandResult: + """ + Objects evacuation from shard to other shards. + + Args: + address: Address of wallet account + all: Process all shards + await: Block execution until evacuation is completed + endpoint: Remote node control address (as 'multiaddr' or ':') + id: List of shard IDs in base58 encoding + no_errors: Skip invalid/unreadable objects (default true) + no_progress: Print progress if await provided + scope: Evacuation scope; possible values: trees, objects, all (default "all") + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation start", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_reset( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Reset evacuate objects from shard to other shards status. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation reset", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_stop( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Stop running evacuate process from shard to other shards. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation stop", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_status( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Get evacuate objects from shard to other shards status. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation status", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index 3d313f1..5017406 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -2,22 +2,22 @@ import json from typing import Any from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards +from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.test_control import wait_for_success class ShardsWatcher: - shards_snapshots: list[dict[str, Any]] = [] - def __init__(self, node_under_test: ClusterNode) -> None: + self.shards_snapshots: list[dict[str, Any]] = [] self.storage_node = node_under_test.storage_node self.take_shards_snapshot() - def take_shards_snapshot(self): + def take_shards_snapshot(self) -> None: snapshot = self.get_shards_snapshot() self.shards_snapshots.append(snapshot) - def get_shards_snapshot(self): + def get_shards_snapshot(self) -> dict[str, Any]: shards_snapshot: dict[str, Any] = {} shards = self.get_shards() @@ -26,17 +26,17 @@ class ShardsWatcher: return shards_snapshot - def _get_current_snapshot(self): + def _get_current_snapshot(self) -> dict[str, Any]: return self.shards_snapshots[-1] - def _get_previous_snapshot(self): + def _get_previous_snapshot(self) -> dict[str, Any]: return self.shards_snapshots[-2] - def _is_shard_present(self, shard_id): + def _is_shard_present(self, shard_id) -> bool: snapshot = self._get_current_snapshot() return shard_id in snapshot - def get_shards_with_new_errors(self): + def get_shards_with_new_errors(self) -> dict[str, Any]: current_snapshot = self._get_current_snapshot() previous_snapshot = self._get_previous_snapshot() shards_with_new_errors: dict[str, Any] = {} @@ -46,7 +46,7 @@ class ShardsWatcher: return shards_with_new_errors - def get_shards_with_errors(self): + def get_shards_with_errors(self) -> dict[str, Any]: snapshot = self.get_shards_snapshot() shards_with_errors: dict[str, Any] = {} for shard_id, shard in snapshot.items(): @@ -55,7 +55,7 @@ class ShardsWatcher: return shards_with_errors - def get_shard_status(self, shard_id: str): + def get_shard_status(self, shard_id: str): # -> Any: snapshot = self.get_shards_snapshot() assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}" @@ -63,18 +63,18 @@ class ShardsWatcher: return snapshot[shard_id]["mode"] @wait_for_success(60, 2) - def await_for_all_shards_status(self, status: str): + def await_for_all_shards_status(self, status: str) -> None: snapshot = self.get_shards_snapshot() for shard_id in snapshot: assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status" @wait_for_success(60, 2) - def await_for_shard_status(self, shard_id: str, status: str): + def await_for_shard_status(self, shard_id: str, status: str) -> None: assert self.get_shard_status(shard_id) == status @wait_for_success(60, 2) - def await_for_shard_have_new_errors(self, shard_id: str): + def await_for_shard_have_new_errors(self, shard_id: str) -> None: self.take_shards_snapshot() assert self._is_shard_present(shard_id) shards_with_new_errors = self.get_shards_with_new_errors() @@ -82,7 +82,7 @@ class ShardsWatcher: assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" @wait_for_success(300, 5) - def await_for_shards_have_no_new_errors(self): + def await_for_shards_have_no_new_errors(self) -> None: self.take_shards_snapshot() shards_with_new_errors = self.get_shards_with_new_errors() assert len(shards_with_new_errors) == 0 @@ -102,7 +102,7 @@ class ShardsWatcher: return json.loads(response.stdout.split(">", 1)[1]) - def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True): + def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True) -> CommandResult: shards_cli = FrostfsCliShards( self.storage_node.host.get_shell(), self.storage_node.host.get_cli_config("frostfs-cli").exec_path, From 54b42e2d8d1c3815d1379918f54d432381a685b2 Mon Sep 17 00:00:00 2001 From: Roman Chernykh Date: Wed, 31 Jul 2024 18:32:08 +0500 Subject: [PATCH 185/274] [#274] Fix iam_attach_group_policy function --- src/frostfs_testlib/s3/aws_cli_client.py | 2 +- src/frostfs_testlib/s3/interfaces.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index ae9254c..8169afe 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -975,7 +975,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" return response diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index f3793e0..b1825d5 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -408,7 +408,7 @@ class S3ClientWrapper(HumanReadableABC): """Adds the specified user to the specified group""" @abstractmethod - def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict: + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: """Attaches the specified managed policy to the specified IAM group""" @abstractmethod From ae9e8d8c30217485d5442525d32c432aa3f183ce Mon Sep 17 00:00:00 2001 From: Roman Chernykh Date: Fri, 2 Aug 2024 17:38:56 +0500 Subject: [PATCH 186/274] [#274] Fix iam_get_policy function --- src/frostfs_testlib/s3/boto3_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 5686b78..a644a6f 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -776,7 +776,7 @@ class Boto3ClientWrapper(S3ClientWrapper): def iam_get_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" return response From 5bdacdf5ba30d4e60fef82a1c113a1f467c3c99a Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Thu, 25 Jul 2024 15:32:07 +0300 Subject: [PATCH 187/274] [#269] Fix get contracts method --- .../storage/controllers/cluster_state_controller.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index cec5ed3..5d87a60 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -538,4 +538,4 @@ class ClusterStateController: shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, ) - return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_endpoint(), domain_name).stdout + return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout From 8a8b35846e9bca105233a9cad30d39d8e98dd312 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 7 Aug 2024 17:35:02 +0300 Subject: [PATCH 188/274] [#278] Small QoL updates Signed-off-by: a.berezin --- src/frostfs_testlib/resources/common.py | 2 ++ src/frostfs_testlib/shell/local_shell.py | 5 ++++- src/frostfs_testlib/steps/cli/object.py | 7 +------ src/frostfs_testlib/storage/dataclasses/ape.py | 5 +++++ 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 03fdce9..1c93b12 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -51,3 +51,5 @@ CREDENTIALS_CREATE_TIMEOUT = "1m" HOSTING_CONFIG_FILE = os.getenv( "HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml")) ) + +MORE_LOG = os.getenv("MORE_LOG", "1") diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 2fb6631..746070f 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -1,15 +1,18 @@ import logging import subprocess import tempfile +from contextlib import nullcontext from datetime import datetime from typing import IO, Optional import pexpect from frostfs_testlib import reporter +from frostfs_testlib.resources.common import MORE_LOG from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("frostfs.testlib.shell") +step_context = reporter.step if MORE_LOG == "1" else nullcontext class LocalShell(Shell): @@ -28,7 +31,7 @@ class LocalShell(Shell): for inspector in [*self.command_inspectors, *extra_inspectors]: command = inspector.inspect(original_command, command) - with reporter.step(f"Executing command: {command}"): + with step_context(f"Executing command: {command}"): if options.interactive_inputs: return self._exec_interactive(command, options) return self._exec_non_interactive(command, options) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 7de7a71..72debc2 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -15,7 +15,7 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils -from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output +from frostfs_testlib.utils.cli_utils import parse_netmap_output from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") @@ -623,25 +623,20 @@ def head_object( # If response is Complex Object header, it has `splitId` key if "splitId" in decoded.keys(): - logger.info("decoding split header") return json_utils.decode_split_header(decoded) # If response is Last or Linking Object header, # it has `header` dictionary and non-null `split` dictionary if "split" in decoded["header"].keys(): if decoded["header"]["split"]: - logger.info("decoding linking object") return json_utils.decode_linking_object(decoded) if decoded["header"]["objectType"] == "STORAGE_GROUP": - logger.info("decoding storage group") return json_utils.decode_storage_group(decoded) if decoded["header"]["objectType"] == "TOMBSTONE": - logger.info("decoding tombstone") return json_utils.decode_tombstone(decoded) - logger.info("decoding simple header") return json_utils.decode_simple_header(decoded) diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index 84b3033..de1648e 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -47,6 +47,7 @@ class ConditionType(HumanReadableEnum): class ConditionKey(HumanReadableEnum): ROLE = '"\\$Actor:role"' PUBLIC_KEY = '"\\$Actor:publicKey"' + OBJECT_TYPE = '"\\$Object:objectType"' class MatchType(HumanReadableEnum): @@ -75,6 +76,10 @@ class Condition: def by_key(*args, **kwargs) -> "Condition": return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs) + @staticmethod + def by_object_type(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.OBJECT_TYPE, *args, **kwargs) + class Rule: def __init__( From 0ba4a73db336b47f8023036999aab4b24096458d Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 8 Aug 2024 18:34:46 +0300 Subject: [PATCH 189/274] [#279] Add objectID filter for APE Signed-off-by: a.berezin --- src/frostfs_testlib/storage/dataclasses/ape.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index de1648e..b6563f4 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -48,6 +48,7 @@ class ConditionKey(HumanReadableEnum): ROLE = '"\\$Actor:role"' PUBLIC_KEY = '"\\$Actor:publicKey"' OBJECT_TYPE = '"\\$Object:objectType"' + OBJECT_ID = '"\\$Object:objectID"' class MatchType(HumanReadableEnum): @@ -80,6 +81,10 @@ class Condition: def by_object_type(*args, **kwargs) -> "Condition": return Condition(ConditionKey.OBJECT_TYPE, *args, **kwargs) + @staticmethod + def by_object_id(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.OBJECT_ID, *args, **kwargs) + class Rule: def __init__( From 1c2ed2592912834073d3ab9c7d139abc2b04e346 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 13 Aug 2024 10:09:28 +0300 Subject: [PATCH 190/274] [#280] Fix neo-go query height in steps Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/object.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 72debc2..f28de06 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -690,11 +690,13 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: latest_block = first_line.split(":") # taking second line from command's output contain wallet key second_line = output.split("\n")[1] - validated_state = second_line.split(":") - return { - latest_block[0].replace(":", ""): int(latest_block[1]), - validated_state[0].replace(":", ""): int(validated_state[1]), - } + if second_line != "": + validated_state = second_line.split(":") + return { + latest_block[0].replace(":", ""): int(latest_block[1]), + validated_state[0].replace(":", ""): int(validated_state[1]), + } + return {latest_block[0].replace(":", ""): int(latest_block[1])} @wait_for_success() From 6926c09dbe4574f5b54b140163b08a1fed24376b Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Wed, 7 Aug 2024 16:19:00 +0300 Subject: [PATCH 191/274] [#281] add hostname to HostConfig Signed-off-by: m.malygina --- .devenv.hosting.yaml | 1 + src/frostfs_testlib/hosting/config.py | 1 + 2 files changed, 2 insertions(+) diff --git a/.devenv.hosting.yaml b/.devenv.hosting.yaml index d096625..f3b8c51 100644 --- a/.devenv.hosting.yaml +++ b/.devenv.hosting.yaml @@ -1,5 +1,6 @@ hosts: - address: localhost + hostname: localhost attributes: sudo_shell: false plugin_name: docker diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index f52f8b7..6cdee39 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -60,6 +60,7 @@ class HostConfig: """ plugin_name: str + hostname: str healthcheck_plugin_name: str address: str s3_creds_plugin_name: str = field(default="authmate") From 8ae1b99db9777944d59507b36d2e76acd52942ba Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 16 Aug 2024 10:22:21 +0300 Subject: [PATCH 192/274] [#282] New grpc realization for object operations Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/storage/constants.py | 7 + .../dataclasses/storage_object_info.py | 13 + .../storage/grpc_operations/__init__.py | 0 .../grpc_operations/client_wrappers.py | 14 + .../implementations/__init__.py | 0 .../grpc_operations/implementations/chunks.py | 124 ++++ .../implementations/container.py | 112 ++++ .../grpc_operations/implementations/object.py | 616 ++++++++++++++++++ .../storage/grpc_operations/interfaces.py | 285 ++++++++ 9 files changed, 1171 insertions(+) create mode 100644 src/frostfs_testlib/storage/grpc_operations/__init__.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/client_wrappers.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/container.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/object.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces.py diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 66bf5cc..84f8d24 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -16,3 +16,10 @@ class ConfigAttributes: ENDPOINT_PROMETHEUS = "endpoint_prometheus" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" + + +class PlacementRule: + DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" + SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" + REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" + DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 1ecb300..d192de5 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -77,3 +77,16 @@ class NodeNetInfo: maintenance_mode_allowed: str = None eigen_trust_alpha: str = None eigen_trust_iterations: str = None + + +@dataclass +class Chunk: + def __init__(self, object_id: str, required_nodes: list, confirmed_nodes: list, ec_parent_object_id: str, ec_index: int) -> None: + self.object_id = object_id + self.required_nodes = required_nodes + self.confirmed_nodes = confirmed_nodes + self.ec_parent_object_id = ec_parent_object_id + self.ec_index = ec_index + + def __str__(self) -> str: + return self.object_id diff --git a/src/frostfs_testlib/storage/grpc_operations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py new file mode 100644 index 0000000..8cef23b --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py @@ -0,0 +1,14 @@ +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.storage.grpc_operations.implementations import container, object + + +class CliClientWrapper(interfaces.GrpcClientWrapper): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli) + self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli) + + +class RpcClientWrapper(interfaces.GrpcClientWrapper): + pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py new file mode 100644 index 0000000..70d0823 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -0,0 +1,124 @@ +import json +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo +from frostfs_testlib.storage.grps_operations import interfaces +from frostfs_testlib.utils.cli_utils import parse_netmap_output + + +class ChunksOperations(interfaces.ChunksInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + @reporter.step("Search node without chunks") + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + if not endpoint: + endpoint = cluster.default_rpc_endpoint + netmap = parse_netmap_output(self.cli.netmap.snapshot(endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) + chunks_node_key = [] + for chunk in chunks: + chunks_node_key.extend(chunk.confirmed_nodes) + for node_info in netmap.copy(): + if node_info.node_id in chunks_node_key and node_info in netmap: + netmap.remove(node_info) + result = [] + for node_info in netmap: + for cluster_node in cluster.cluster_nodes: + if node_info.node == cluster_node.host_ip: + result.append(cluster_node) + return result + + @reporter.step("Search node with chunk {chunk}") + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + netmap = parse_netmap_output(self.cli.netmap.snapshot(cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) + for node_info in netmap: + if node_info.node_id in chunk.confirmed_nodes: + for cluster_node in cluster.cluster_nodes: + if cluster_node.host_ip == node_info.node: + return (cluster_node, node_info) + + @reporter.step("Search shard with chunk {chunk}") + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}" + node_shell = node.storage_node.host.get_shell() + shards_watcher = ShardsWatcher(node) + + with reporter.step("Search object file"): + for shard_id, shard_info in shards_watcher.shards_snapshots[-1].items(): + check_dir = node_shell.exec(f" [ -d {shard_info['blobstor'][1]['path']}/{oid_path} ] && echo 1 || echo 0").stdout + if "1" in check_dir.strip(): + return shard_id + + @reporter.step("Get all chunks") + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) + return self._parse_object_nodes(object_nodes.stdout) + + @reporter.step("Get last parity chunk") + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) + return self._parse_object_nodes(object_nodes.stdout)[-1] + + @reporter.step("Get first data chunk") + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) + return self._parse_object_nodes(object_nodes.stdout)[0] + + def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: + parse_result = json.loads(object_nodes) + if parse_result.get("errors"): + raise parse_result["errors"] + return [Chunk(**chunk) for chunk in parse_result["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py new file mode 100644 index 0000000..077bdfd --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -0,0 +1,112 @@ +import logging +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.constants import PlacementRule +from frostfs_testlib.storage.grps_operations import interfaces + +logger = logging.getLogger("NeoLogger") + + +class ContainerOperations(interfaces.ContainerInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + @reporter.step("Create Container") + def create( + self, + endpoint: str, + rule: str = PlacementRule.DEFAULT_PLACEMENT_RULE, + basic_acl: str = "", + attributes: Optional[dict] = None, + session_token: str = "", + name: Optional[str] = None, + options: Optional[dict] = None, + await_mode: bool = True, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + A wrapper for `frostfs-cli container create` call. + + Args: + wallet (WalletInfo): a wallet on whose behalf a container is created + rule (optional, str): placement rule for container + basic_acl (optional, str): an ACL for container, will be + appended to `--basic-acl` key + attributes (optional, dict): container attributes , will be + appended to `--attributes` key + session_token (optional, str): a path to session token file + session_wallet(optional, str): a path to the wallet which signed + the session token; this parameter makes sense + when paired with `session_token` + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + options (optional, dict): any other options to pass to the call + name (optional, str): container name attribute + await_mode (bool): block execution until container is persisted + wait_for_creation (): Wait for container shows in container list + timeout: Timeout for the operation. + + Returns: + (str): CID of the created container + """ + result = self.cli.container.create( + rpc_endpoint=endpoint, + policy=rule, + basic_acl=basic_acl, + attributes=attributes, + name=name, + session=session_token, + await_mode=await_mode, + timeout=timeout, + **options or {}, + ) + + cid = self._parse_cid(result.stdout) + + logger.info("Container created; waiting until it is persisted in the sidechain") + + return cid + + @reporter.step("List Containers") + def list(self, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: + """ + A wrapper for `frostfs-cli container list` call. It returns all the + available containers for the given wallet. + Args: + wallet (WalletInfo): a wallet on whose behalf we list the containers + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + result = self.cli.container.list(rpc_endpoint=endpoint, timeout=timeout) + return result.stdout.split() + + def _parse_cid(self, output: str) -> str: + """ + Parses container ID from a given CLI output. The input string we expect: + container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN + awaiting... + container has been persisted on sidechain + We want to take 'container ID' value from the string. + + Args: + output (str): CLI output to parse + + Returns: + (str): extracted CID + """ + try: + # taking first line from command's output + first_line = output.split("\n")[0] + except Exception: + first_line = "" + logger.error(f"Got empty output: {output}") + splitted = first_line.split(": ") + if len(splitted) != 2: + raise ValueError(f"no CID was parsed from command output: \t{first_line}") + return splitted[1] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py new file mode 100644 index 0000000..a967853 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -0,0 +1,616 @@ +import json +import logging +import os +import re +import uuid +from typing import Any, Optional + +from frostfs_testlib import reporter, utils +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.grps_operations import interfaces +from frostfs_testlib.storage.grps_operations.implementations.chunks import ChunksOperations +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils import cli_utils, file_utils + +logger = logging.getLogger("NeoLogger") + + +class ObjectOperations(interfaces.ObjectInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + self.chunks: interfaces.ChunksInterface = ChunksOperations(self.cli) + + @reporter.step("Delete object") + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + DELETE an Object. + + Args: + cid: ID of Container where we get the Object from + oid: ID of Object we are going to delete + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): Tombstone ID + """ + result = self.cli.object.delete( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + id_str = result.stdout.split("\n")[1] + tombstone = id_str.split(":")[1] + return tombstone.strip() + + @reporter.step("Get object") + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> file_utils.TestFile: + """ + GET from FrostFS. + + Args: + cid (str): ID of Container where we get the Object from + oid (str): Object ID + bearer: path to Bearer Token file, appends to `--bearer` key + write_object: path to downloaded file, appends to `--file` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + if not write_object: + write_object = str(uuid.uuid4()) + test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, write_object)) + + self.cli.object.get( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + file=test_file, + bearer=bearer, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + return test_file + + @reporter.step("Get object from random node") + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + GET from FrostFS random storage node + + Args: + cid: ID of Container where we get the Object from + oid: Object ID + cluster: cluster object + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + write_object (optional, str): path to downloaded file, appends to `--file` key + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return self.get( + cid, + oid, + endpoint, + bearer, + write_object, + xhdr, + no_progress, + session, + timeout, + ) + + @reporter.step("Get hash object") + def hash( + self, + rpc_endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + Get object hash. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + generate_key: Generate new private key. + oid: Object ID. + range: Range to take hash from in the form offset1:length1,... + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + salt: Salt in hex format. + ttl: TTL value in request meta header (default 2). + session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. + hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). + + Returns: + Command's result. + """ + result = self.cli.object.hash( + rpc_endpoint=rpc_endpoint, + cid=cid, + oid=oid, + address=address, + bearer=bearer, + generate_key=generate_key, + range=range, + salt=salt, + ttl=ttl, + xhdr=xhdr, + session=session, + hash_type=hash_type, + timeout=timeout, + ) + return result.stdout + + @reporter.step("Head object") + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> CommandResult | Any: + """ + HEAD an Object. + + Args: + cid (str): ID of Container where we get the Object from + oid (str): ObjectID to HEAD + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + endpoint(optional, str): FrostFS endpoint to send request to + json_output(optional, bool): return response in JSON format or not; this flag + turns into `--json` key + is_raw(optional, bool): send "raw" request or not; this flag + turns into `--raw` key + is_direct(optional, bool): send request directly to the node or not; this flag + turns into `--ttl 1` key + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + depending on the `json_output` parameter value, the function returns + (dict): HEAD response in JSON format + or + (str): HEAD response as a plain text + """ + result = self.cli.object.head( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + json_mode=json_output, + raw=is_raw, + ttl=1 if is_direct else None, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + if not json_output: + return result + + try: + decoded = json.loads(result.stdout) + except Exception as exc: + # If we failed to parse output as JSON, the cause might be + # the plain text string in the beginning of the output. + # Here we cut off first string and try to parse again. + logger.info(f"failed to parse output: {exc}") + logger.info("parsing output in another way") + fst_line_idx = result.stdout.find("\n") + decoded = json.loads(result.stdout[fst_line_idx:]) + + # if response + if "chunks" in decoded.keys(): + logger.info("decoding ec chunks") + return decoded["chunks"] + + # If response is Complex Object header, it has `splitId` key + if "splitId" in decoded.keys(): + logger.info("decoding split header") + return utils.json_utils.decode_split_header(decoded) + + # If response is Last or Linking Object header, + # it has `header` dictionary and non-null `split` dictionary + if "split" in decoded["header"].keys(): + if decoded["header"]["split"]: + logger.info("decoding linking object") + return utils.json_utils.decode_linking_object(decoded) + + if decoded["header"]["objectType"] == "STORAGE_GROUP": + logger.info("decoding storage group") + return utils.json_utils.decode_storage_group(decoded) + + if decoded["header"]["objectType"] == "TOMBSTONE": + logger.info("decoding tombstone") + return utils.json_utils.decode_tombstone(decoded) + + logger.info("decoding simple header") + return utils.json_utils.decode_simple_header(decoded) + + @reporter.step("Lock Object") + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + Locks object in container. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + oid: Object ID. + lifetime: Lock lifetime. + expire_at: Lock expiration epoch. + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation. + + Returns: + Lock object ID + """ + result = self.cli.object.lock( + rpc_endpoint=endpoint, + lifetime=lifetime, + expire_at=expire_at, + address=address, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + ttl=ttl, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[0] + oid = id_str.split(":")[1] + return oid.strip() + + @reporter.step("Put object") + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + PUT of given file. + + Args: + path: path to file to be PUT + cid: ID of Container where we get the Object from + bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call + attributes: User attributes in form of Key1=Value1,Key2=Value2 + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): ID of uploaded Object + """ + result = self.cli.object.put( + rpc_endpoint=endpoint, + file=path, + cid=cid, + attributes=attributes, + bearer=bearer, + copies_number=copies_number, + expire_at=expire_at, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[-2] + oid = id_str.split(":")[1] + return oid.strip() + + @reporter.step("Put object to random node") + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + PUT of given file to a random storage node. + + Args: + path: path to file to be PUT + cid: ID of Container where we get the Object from + cluster: cluster under test + bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call + attributes: User attributes in form of Key1=Value1,Key2=Value2 + cluster: cluster under test + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + ID of uploaded Object + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return self.put( + path, + cid, + endpoint, + bearer, + copies_number, + attributes, + xhdr, + expire_at, + no_progress, + session, + timeout=timeout, + ) + + @reporter.step("Get Range") + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> tuple[file_utils.TestFile, bytes]: + """ + GETRANGE an Object. + + Args: + wallet: wallet on whose behalf GETRANGE is done + cid: ID of Container where we get the Object from + oid: ID of Object we are going to request + range_cut: range to take data from in the form offset:length + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + bearer: path to Bearer Token file, appends to `--bearer` key + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str, bytes) - path to the file with range content and content of this file as bytes + """ + test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) + + self.cli.object.range( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=range_cut, + file=test_file, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + with open(test_file, "rb") as file: + content = file.read() + return test_file, content + + @reporter.step("Search object") + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list: + """ + SEARCH an Object. + + Args: + wallet: wallet on whose behalf SEARCH is done + cid: ID of Container where we get the Object from + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + filters: key=value pairs to filter Objects + expected_objects_list: a list of ObjectIDs to compare found Objects with + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + phy: Search physically stored objects. + root: Search for user objects. + timeout: Timeout for the operation. + + Returns: + list of found ObjectIDs + """ + result = self.cli.object.search( + rpc_endpoint=endpoint, + cid=cid, + bearer=bearer, + xhdr=xhdr, + filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, + session=session, + phy=phy, + root=root, + timeout=timeout, + ) + + found_objects = re.findall(r"(\w{43,44})", result.stdout) + + if expected_objects_list: + if sorted(found_objects) == sorted(expected_objects_list): + logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") + else: + logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") + + return found_objects + + @wait_for_success() + @reporter.step("Search object nodes") + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[ClusterNode]: + endpoint = alive_node.storage_node.get_rpc_endpoint() + + response = self.cli.object.nodes( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + json=True, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + + response_json = json.loads(response.stdout) + # Currently, the command will show expected and confirmed nodes. + # And we (currently) count only nodes which are both expected and confirmed + object_nodes_id = { + required_node + for data_object in response_json["data_objects"] + for required_node in data_object["required_nodes"] + if required_node in data_object["confirmed_nodes"] + } + + netmap_nodes_list = cli_utils.parse_netmap_output( + self.cli.netmap.snapshot( + rpc_endpoint=endpoint, + ).stdout + ) + netmap_nodes = [ + netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id + ] + + object_nodes = [ + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.host_ip + ] + + return object_nodes diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py new file mode 100644 index 0000000..c39accc --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -0,0 +1,285 @@ +from abc import ABC, abstractmethod +from typing import Any, Optional + +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.constants import PlacementRule +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo +from frostfs_testlib.utils import file_utils + + +class ChunksInterface(ABC): + @abstractmethod + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + pass + + @abstractmethod + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + pass + + @abstractmethod + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + pass + + @abstractmethod + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + pass + + @abstractmethod + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + @abstractmethod + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + +class ObjectInterface(ABC): + def __init__(self) -> None: + self.chunks: ChunksInterface + + @abstractmethod + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> file_utils.TestFile: + pass + + @abstractmethod + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def hash( + self, + rpc_endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> CommandResult | Any: + pass + + @abstractmethod + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> tuple[file_utils.TestFile, bytes]: + pass + + @abstractmethod + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list: + pass + + @abstractmethod + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[ClusterNode]: + pass + + +class ContainerInterface(ABC): + @abstractmethod + def create( + self, + endpoint: str, + rule: str = PlacementRule.DEFAULT_PLACEMENT_RULE, + basic_acl: str = "", + attributes: Optional[dict] = None, + session_token: str = "", + name: Optional[str] = None, + options: Optional[dict] = None, + await_mode: bool = True, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + +class GrpcClientWrapper(ABC): + def __init__(self) -> None: + self.object: ObjectInterface + self.container: ContainerInterface From 0caca54e36751d3b9f9b4feeaae07c396ff656a9 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 16 Aug 2024 18:12:25 +0300 Subject: [PATCH 193/274] [#283] Fix mistakes Signed-off-by: a.berezin --- .../storage/grpc_operations/implementations/chunks.py | 2 +- .../storage/grpc_operations/implementations/container.py | 2 +- .../storage/grpc_operations/implementations/object.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index 70d0823..b0f196e 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -7,7 +7,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo -from frostfs_testlib.storage.grps_operations import interfaces +from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.utils.cli_utils import parse_netmap_output diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index 077bdfd..cac2df4 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -5,7 +5,7 @@ from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.storage.constants import PlacementRule -from frostfs_testlib.storage.grps_operations import interfaces +from frostfs_testlib.storage.grpc_operations import interfaces logger = logging.getLogger("NeoLogger") diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index a967853..63a2922 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -11,8 +11,8 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.grps_operations import interfaces -from frostfs_testlib.storage.grps_operations.implementations.chunks import ChunksOperations +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils import cli_utils, file_utils From 85c2707ec807a4220504a40bcf1655e2aefe4869 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 28 Aug 2024 12:12:05 +0300 Subject: [PATCH 194/274] [#284] Add container operational in CliWrapper Signed-off-by: Dmitriy Zayakin --- .../cli/frostfs_cli/container.py | 20 ++ .../implementations/container.py | 247 ++++++++++++++++-- .../storage/grpc_operations/interfaces.py | 148 +++++++++-- 3 files changed, 377 insertions(+), 38 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 1ff217f..8bcbe9e 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -16,6 +16,8 @@ class FrostfsCliContainer(CliCommand): basic_acl: Optional[str] = None, await_mode: bool = False, disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, nonce: Optional[str] = None, policy: Optional[str] = None, @@ -37,6 +39,8 @@ class FrostfsCliContainer(CliCommand): basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write', 'private', 'eacl-public-read' (default "private"). disable_timestamp: Disable timestamp container attribute. + force: Skip placement validity check. + trace: Generate trace ID and print it. name: Container name attribute. nonce: UUIDv4 nonce value for container. policy: QL-encoded or JSON-encoded placement policy or path to file with it. @@ -69,6 +73,7 @@ class FrostfsCliContainer(CliCommand): ttl: Optional[int] = None, xhdr: Optional[dict] = None, force: bool = False, + trace: bool = False, ) -> CommandResult: """ Delete an existing container. @@ -78,6 +83,7 @@ class FrostfsCliContainer(CliCommand): address: Address of wallet account. await_mode: Block execution until container is removed. cid: Container ID. + trace: Generate trace ID and print it. force: Do not check whether container contains locks and remove immediately. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Path to a JSON-encoded container session token. @@ -104,6 +110,7 @@ class FrostfsCliContainer(CliCommand): await_mode: bool = False, to: Optional[str] = None, json_mode: bool = False, + trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -116,6 +123,7 @@ class FrostfsCliContainer(CliCommand): await_mode: Block execution until container is removed. cid: Container ID. json_mode: Print or dump container in JSON format. + trace: Generate trace ID and print it. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. ttl: TTL value in request meta header (default 2). @@ -155,6 +163,8 @@ class FrostfsCliContainer(CliCommand): cid: Container ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. + json_mode: Print or dump container in JSON format. + trace: Generate trace ID and print it. session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. @@ -174,6 +184,7 @@ class FrostfsCliContainer(CliCommand): def list( self, rpc_endpoint: str, + name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, generate_key: Optional[bool] = None, @@ -188,11 +199,13 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. + name: List containers by the attribute name. owner: Owner of containers (omit to use owner from private key). rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). generate_key: Generate a new private key. @@ -208,9 +221,11 @@ class FrostfsCliContainer(CliCommand): self, rpc_endpoint: str, cid: str, + bearer: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, generate_key: Optional[bool] = None, + trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -221,10 +236,12 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. cid: Container ID. + bearer: File with signed JSON or binary encoded bearer token. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). generate_key: Generate a new private key. @@ -236,6 +253,7 @@ class FrostfsCliContainer(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) + # TODO Deprecated method with 0.42 def set_eacl( self, rpc_endpoint: str, @@ -281,6 +299,7 @@ class FrostfsCliContainer(CliCommand): address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, + trace: bool = False, short: Optional[bool] = True, xhdr: Optional[dict] = None, generate_key: Optional[bool] = None, @@ -298,6 +317,7 @@ class FrostfsCliContainer(CliCommand): from_file: string File path with encoded container timeout: duration Timeout for the operation (default 15 s) short: shorten the output of node information. + trace: Generate trace ID and print it. xhdr: Dict with request X-Headers. generate_key: Generate a new private key. diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index cac2df4..c8360ea 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -1,11 +1,16 @@ +import json import logging -from typing import Optional +import re +from typing import List, Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.constants import PlacementRule +from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.utils import json_utils logger = logging.getLogger("NeoLogger") @@ -18,13 +23,22 @@ class ContainerOperations(interfaces.ContainerInterface): def create( self, endpoint: str, - rule: str = PlacementRule.DEFAULT_PLACEMENT_RULE, - basic_acl: str = "", + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, attributes: Optional[dict] = None, - session_token: str = "", + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, - options: Optional[dict] = None, - await_mode: bool = True, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> str: """ @@ -54,14 +68,23 @@ class ContainerOperations(interfaces.ContainerInterface): """ result = self.cli.container.create( rpc_endpoint=endpoint, - policy=rule, - basic_acl=basic_acl, + policy=policy, + nns_zone=nns_zone, + nns_name=nns_name, + address=address, attributes=attributes, - name=name, - session=session_token, + basic_acl=basic_acl, await_mode=await_mode, + disable_timestamp=disable_timestamp, + force=force, + trace=trace, + name=name, + nonce=nonce, + session=session, + subnet=subnet, + ttl=ttl, + xhdr=xhdr, timeout=timeout, - **options or {}, ) cid = self._parse_cid(result.stdout) @@ -71,21 +94,215 @@ class ContainerOperations(interfaces.ContainerInterface): return cid @reporter.step("List Containers") - def list(self, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + **params, + ) -> List[str]: """ A wrapper for `frostfs-cli container list` call. It returns all the available containers for the given wallet. Args: - wallet (WalletInfo): a wallet on whose behalf we list the containers shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key timeout: Timeout for the operation. Returns: (list): list of containers """ - result = self.cli.container.list(rpc_endpoint=endpoint, timeout=timeout) + result = self.cli.container.list( + rpc_endpoint=endpoint, + name=name, + address=address, + generate_key=generate_key, + owner=owner, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + **params, + ) return result.stdout.split() + @reporter.step("List Objects in container") + def list_objects( + self, + endpoint: str, + cid: str, + bearer: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[str]: + """ + A wrapper for `frostfs-cli container list-objects` call. It returns all the + available objects in container. + Args: + container_id: cid of container + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + result = self.cli.container.list_objects( + rpc_endpoint=endpoint, + cid=cid, + bearer=bearer, + wallet=wallet, + address=address, + generate_key=generate_key, + trace=trace, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + logger.info(f"Container objects: \n{result}") + return result.stdout.split() + + @reporter.step("Delete container") + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ): + try: + return self.cli.container.delete( + rpc_endpoint=endpoint, + cid=cid, + address=address, + await_mode=await_mode, + session=session, + ttl=ttl, + xhdr=xhdr, + force=force, + trace=trace, + ).stdout + except RuntimeError as e: + print(f"Error request:\n{e}") + + @reporter.step("Get container") + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> Union[dict, str]: + result = self.cli.container.get( + rpc_endpoint=endpoint, + cid=cid, + address=address, + generate_key=generate_key, + await_mode=await_mode, + to=to, + json_mode=json_mode, + trace=trace, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + container_info = json.loads(result.stdout) + attributes = dict() + for attr in container_info["attributes"]: + attributes[attr["key"]] = attr["value"] + container_info["attributes"] = attributes + container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"]) + return container_info + + @reporter.step("Get eacl container") + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.container.get_eacl( + rpc_endpoint=endpoint, + cid=cid, + address=address, + generate_key=generate_key, + await_mode=await_mode, + to=to, + session=session, + ttl=ttl, + xhdr=xhdr, + timeout=CLI_DEFAULT_TIMEOUT, + ).stdout + + @reporter.step("Get nodes container") + def nodes( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[ClusterNode]: + result = self.cli.container.search_node( + rpc_endpoint=endpoint, + cid=cid, + address=address, + ttl=ttl, + from_file=from_file, + trace=trace, + short=short, + xhdr=xhdr, + generate_key=generate_key, + timeout=timeout, + ).stdout + + pattern = r"[0-9]+(?:\.[0-9]+){3}" + nodes_ip = list(set(re.findall(pattern, result))) + + with reporter.step(f"nodes ips = {nodes_ip}"): + nodes_list = cluster.get_nodes_by_ip(nodes_ip) + + with reporter.step(f"Return nodes - {nodes_list}"): + return nodes_list + + @reporter.step("Resolve container by name") + def resolve_container_by_name(name: str, node: ClusterNode): + resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) + resolver: BucketContainerResolver = resolver_cls() + return resolver.resolve(node, name) + def _parse_cid(self, output: str) -> str: """ Parses container ID from a given CLI output. The input string we expect: diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py index c39accc..1947435 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -1,7 +1,6 @@ from abc import ABC, abstractmethod -from typing import Any, Optional +from typing import Any, List, Optional -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.constants import PlacementRule @@ -96,7 +95,7 @@ class ObjectInterface(ABC): bearer: str = "", xhdr: Optional[dict] = None, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -111,7 +110,7 @@ class ObjectInterface(ABC): xhdr: Optional[dict] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> file_utils.TestFile: pass @@ -126,14 +125,14 @@ class ObjectInterface(ABC): xhdr: Optional[dict] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @abstractmethod def hash( self, - rpc_endpoint: str, + endpoint: str, cid: str, oid: str, address: Optional[str] = None, @@ -145,7 +144,7 @@ class ObjectInterface(ABC): session: Optional[str] = None, hash_type: Optional[str] = None, xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -161,7 +160,7 @@ class ObjectInterface(ABC): is_raw: bool = False, is_direct: bool = False, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> CommandResult | Any: pass @@ -178,7 +177,7 @@ class ObjectInterface(ABC): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -195,7 +194,7 @@ class ObjectInterface(ABC): expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -212,7 +211,7 @@ class ObjectInterface(ABC): expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -226,7 +225,7 @@ class ObjectInterface(ABC): bearer: str = "", xhdr: Optional[dict] = None, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> tuple[file_utils.TestFile, bytes]: pass @@ -242,8 +241,8 @@ class ObjectInterface(ABC): session: Optional[str] = None, phy: bool = False, root: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list: + timeout: Optional[str] = None, + ) -> List: pass @abstractmethod @@ -257,8 +256,8 @@ class ObjectInterface(ABC): xhdr: Optional[dict] = None, is_direct: bool = False, verify_presence_all: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list[ClusterNode]: + timeout: Optional[str] = None, + ) -> List[ClusterNode]: pass @@ -267,16 +266,119 @@ class ContainerInterface(ABC): def create( self, endpoint: str, - rule: str = PlacementRule.DEFAULT_PLACEMENT_RULE, - basic_acl: str = "", + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, attributes: Optional[dict] = None, - session_token: str = "", + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, - options: Optional[dict] = None, - await_mode: bool = True, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> str: - pass + """ + Create a new container and register it in the FrostFS. + It will be stored in the sidechain when the Inner Ring accepts it. + """ + raise NotImplementedError("No implemethed method create") + + @abstractmethod + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ) -> List[str]: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + """ + raise NotImplementedError("No implemethed method delete") + + @abstractmethod + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get container field info.""" + raise NotImplementedError("No implemethed method get") + + @abstractmethod + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get extended ACL table of container.""" + raise NotImplementedError("No implemethed method get-eacl") + + @abstractmethod + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + **params, + ) -> List[str]: + """List all created containers.""" + raise NotImplementedError("No implemethed method list") + + @abstractmethod + def nodes( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Show the nodes participating in the container in the current epoch.""" + raise NotImplementedError("No implemethed method nodes") class GrpcClientWrapper(ABC): From eba782e7d26945d75bb1e233b16058e3b1b52f7d Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 2 Sep 2024 13:30:01 +0300 Subject: [PATCH 195/274] [#285] Change func search bucket nodes and remove old resolver bucket cnr Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/container.py | 7 ------- src/frostfs_testlib/steps/s3/s3_helper.py | 6 ++++-- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 641b321..809b39a 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -327,13 +327,6 @@ def _parse_cid(output: str) -> str: return splitted[1] -@reporter.step("Search container by name") -def search_container_by_name(name: str, node: ClusterNode): - resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) - resolver: BucketContainerResolver = resolver_cls() - return resolver.resolve(node, name) - - @reporter.step("Search for nodes with a container") def search_nodes_with_container( wallet: WalletInfo, diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 9b85766..dbf48d3 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -7,8 +7,9 @@ from dateutil.parser import parse from frostfs_testlib import reporter from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus +from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container +from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo @@ -175,10 +176,11 @@ def search_nodes_with_bucket( wallet: WalletInfo, shell: Shell, endpoint: str, + bucket_container_resolver: BucketContainerResolver, ) -> list[ClusterNode]: cid = None for cluster_node in cluster.cluster_nodes: - cid = search_container_by_name(name=bucket_name, node=cluster_node) + cid = bucket_container_resolver.resolve(cluster_node, bucket_name) if cid: break nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) From d2f8323fb95c547ae35b984744b1ef63ce502dba Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 3 Sep 2024 15:11:43 +0300 Subject: [PATCH 196/274] [#286] Change args id in shards.set-mode command Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index e88707a..82ea87b 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -40,7 +40,7 @@ class FrostfsCliShards(CliCommand): self, endpoint: str, mode: str, - id: Optional[list[str]], + id: Optional[list[str]] = None, wallet: Optional[str] = None, wallet_password: Optional[str] = None, address: Optional[str] = None, From 84e83487f9896cc1e95c64680bf7664724a4c59c Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 10 Sep 2024 13:54:51 +0300 Subject: [PATCH 197/274] [#288] Update object and chunks Clients Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/object.py | 2 +- .../grpc_operations/implementations/chunks.py | 63 +++++++++++++++---- 2 files changed, 52 insertions(+), 13 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 070def0..1857987 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -370,11 +370,11 @@ class FrostfsCliObject(CliCommand): self, rpc_endpoint: str, cid: str, + oid: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, - oid: Optional[str] = None, trace: bool = False, root: bool = False, verify_presence_all: bool = False, diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index b0f196e..d1bba9f 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -60,7 +60,6 @@ class ChunksOperations(interfaces.ChunksInterface): rpc_endpoint: str, cid: str, oid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, @@ -72,15 +71,28 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> list[Chunk]: - object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) - return self._parse_object_nodes(object_nodes.stdout) + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] @reporter.step("Get last parity chunk") def get_parity( self, rpc_endpoint: str, cid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, @@ -93,29 +105,56 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> Chunk: - object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) - return self._parse_object_nodes(object_nodes.stdout)[-1] + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] @reporter.step("Get first data chunk") def get_first_data( self, rpc_endpoint: str, cid: str, - wallet: Optional[str] = None, + oid: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, + trace: bool = True, root: bool = False, verify_presence_all: bool = False, json: bool = True, ttl: Optional[int] = None, xhdr: Optional[dict] = None, - timeout: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> Chunk: - object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) - return self._parse_object_nodes(object_nodes.stdout)[0] + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: parse_result = json.loads(object_nodes) From 565fd4c72b6ab562f3024d471ff0aad5f2f42514 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 10 Sep 2024 15:14:32 +0300 Subject: [PATCH 198/274] [#289] Move temp dir fixture to testlib Signed-off-by: a.berezin --- src/frostfs_testlib/__init__.py | 2 +- src/frostfs_testlib/fixtures.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 2cdaf4e..f3143e6 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,3 +1,3 @@ __version__ = "2.0.1" -from .fixtures import configure_testlib, hosting +from .fixtures import configure_testlib, hosting, temp_directory diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py index 8f6873f..d0f92f2 100644 --- a/src/frostfs_testlib/fixtures.py +++ b/src/frostfs_testlib/fixtures.py @@ -7,7 +7,7 @@ import yaml from frostfs_testlib import reporter from frostfs_testlib.hosting.hosting import Hosting -from frostfs_testlib.resources.common import HOSTING_CONFIG_FILE +from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE from frostfs_testlib.storage import get_service_registry @@ -24,6 +24,16 @@ def configure_testlib(): registry.register_service(svc.name, svc.load()) +@pytest.fixture(scope="session") +def temp_directory(configure_testlib): + with reporter.step("Prepare tmp directory"): + full_path = ASSETS_DIR + if not os.path.exists(full_path): + os.mkdir(full_path) + + return full_path + + @pytest.fixture(scope="session") def hosting(configure_testlib) -> Hosting: with open(HOSTING_CONFIG_FILE, "r") as file: From 36bfe385d59f9ddb69593d1095e8d15c0d1c4e0d Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 9 Sep 2024 20:44:31 +0300 Subject: [PATCH 199/274] Added method get s3 endpoint for namespace --- src/frostfs_testlib/storage/constants.py | 1 + src/frostfs_testlib/storage/dataclasses/frostfs_services.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 84f8d24..2cffd3a 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -12,6 +12,7 @@ class ConfigAttributes: REMOTE_WALLET_CONFIG = "remote_wallet_config_path" ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" + ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_PROMETHEUS = "endpoint_prometheus" CONTROL_ENDPOINT = "control_endpoint" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 16efd72..1420356 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -45,6 +45,9 @@ class S3Gate(NodeBase): self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), ] + def get_ns_endpoint(self, ns_name: str) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name) + def service_healthcheck(self) -> bool: health_metric = "frostfs_s3_gw_state_health" output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout From 4a2ac8a9b6ed8fe37c25bff91422f2d4232d2ab3 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 11 Sep 2024 10:42:51 +0300 Subject: [PATCH 200/274] [#290] Update restore traffic method Signed-off-by: Dmitriy Zayakin --- .../storage/controllers/cluster_state_controller.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 5d87a60..7f93e40 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -325,6 +325,8 @@ class ClusterStateController: node: ClusterNode, ) -> None: IpHelper.restore_input_traffic_to_node(node=node) + index = self.dropped_traffic.index(node) + self.dropped_traffic.pop(index) @reporter.step("Restore blocked nodes") def restore_all_traffic(self): @@ -531,7 +533,7 @@ class ClusterStateController: except Exception as err: logger.warning(f"Host ping fails with error {err}") return HostStatus.ONLINE - + @reporter.step("Get contract by domain - {domain_name}") def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): frostfs_adm = FrostfsAdm( From 1bee69042b1982f5167bfbef9e7b01a768452688 Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Tue, 10 Sep 2024 10:45:22 +0300 Subject: [PATCH 201/274] [#294] add wipe data using wipefs method Signed-off-by: m.malygina --- src/frostfs_testlib/hosting/docker_host.py | 8 +++++++- src/frostfs_testlib/hosting/interfaces.py | 19 +++++++++++++++++-- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 0fb5af0..5110e63 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -185,6 +185,12 @@ class DockerHost(Host): def is_file_exist(self, file_path: str) -> None: raise NotImplementedError("Not implemented for docker") + def wipefs_storage_node_data(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def finish_wipefs(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: volume_path = self.get_data_directory(service_name) @@ -240,7 +246,7 @@ class DockerHost(Host): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, - priority: Optional[str] = None + priority: Optional[str] = None, ) -> str: client = self._get_docker_client() filtered_logs = "" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 36c2804..b84326a 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -178,6 +178,21 @@ class Host(ABC): cache_only: To delete cache only. """ + @abstractmethod + def wipefs_storage_node_data(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + + def finish_wipefs(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + @abstractmethod def delete_fstree(self, service_name: str) -> None: """ @@ -297,7 +312,7 @@ class Host(ABC): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, - priority: Optional[str] = None + priority: Optional[str] = None, ) -> str: """Get logs from host filtered by regex. @@ -306,7 +321,7 @@ class Host(ABC): since: If set, limits the time from which logs should be collected. Must be in UTC. until: If set, limits the time until which logs should be collected. Must be in UTC. unit: required unit. - priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. + priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. Returns: From 0d750ed114653c05f810d35b0ab05d1104af40c2 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 17 Sep 2024 07:52:32 +0300 Subject: [PATCH 202/274] [#293] Add in CSC methods change blockchain netmap and update CliWrapper Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/node_management.py | 40 +++++---------- .../controllers/cluster_state_controller.py | 49 ++++++++++--------- .../dataclasses/storage_object_info.py | 3 ++ .../grpc_operations/implementations/chunks.py | 10 ++-- .../implementations/container.py | 3 +- .../grpc_operations/implementations/object.py | 8 +++ .../storage/grpc_operations/interfaces.py | 7 ++- 7 files changed, 63 insertions(+), 57 deletions(-) diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index ece674b..42b1fc5 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -13,6 +13,7 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.storage.cluster import Cluster, StorageNode +from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils import datetime_utils logger = logging.getLogger("NeoLogger") @@ -111,10 +112,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: storage_wallet_path = node.get_wallet_path() cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config) - return cli.netmap.snapshot( - rpc_endpoint=node.get_rpc_endpoint(), - wallet=storage_wallet_path, - ).stdout + return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout @reporter.step("Get shard list for {node}") @@ -202,12 +200,7 @@ def delete_node_data(node: StorageNode) -> None: @reporter.step("Exclude node {node_to_exclude} from network map") -def exclude_node_from_network_map( - node_to_exclude: StorageNode, - alive_node: StorageNode, - shell: Shell, - cluster: Cluster, -) -> None: +def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: node_netmap_key = node_to_exclude.get_wallet_public_key() storage_node_set_status(node_to_exclude, status="offline") @@ -221,12 +214,7 @@ def exclude_node_from_network_map( @reporter.step("Include node {node_to_include} into network map") -def include_node_to_network_map( - node_to_include: StorageNode, - alive_node: StorageNode, - shell: Shell, - cluster: Cluster, -) -> None: +def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: storage_node_set_status(node_to_include, status="online") # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. @@ -236,7 +224,7 @@ def include_node_to_network_map( tick_epoch(shell, cluster) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) - check_node_in_map(node_to_include, shell, alive_node) + await_node_in_map(node_to_include, shell, alive_node) @reporter.step("Check node {node} in network map") @@ -250,6 +238,11 @@ def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[Stor assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" +@wait_for_success(300, 15, title="Await node {node} in network map") +def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: + check_node_in_map(node, shell, alive_node) + + @reporter.step("Check node {node} NOT in network map") def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: alive_node = alive_node or node @@ -276,12 +269,7 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None: @reporter.step("Remove nodes from network map trough cli-adm morph command") -def remove_nodes_from_map_morph( - shell: Shell, - cluster: Cluster, - remove_nodes: list[StorageNode], - alive_node: Optional[StorageNode] = None, -): +def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None): """ Move node to the Offline state in the candidates list and tick an epoch to update the netmap using frostfs-adm @@ -300,9 +288,5 @@ def remove_nodes_from_map_morph( if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) - frostfsadm = FrostfsAdm( - shell=remote_shell, - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - config_file=FROSTFS_ADM_CONFIG_PATH, - ) + frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfsadm.morph.remove_nodes(node_netmap_keys) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 7f93e40..53098b1 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -14,6 +14,7 @@ from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_E from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IpHelper +from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass @@ -39,6 +40,7 @@ class ClusterStateController: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} self.dropped_traffic: list[ClusterNode] = [] + self.excluded_from_netmap: list[StorageNode] = [] self.stopped_services: set[NodeBase] = set() self.cluster = cluster self.healthcheck = healthcheck @@ -307,23 +309,14 @@ class ClusterStateController: self.suspended_services = {} @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") - def drop_traffic( - self, - node: ClusterNode, - wakeup_timeout: int, - name_interface: str, - block_nodes: list[ClusterNode] = None, - ) -> None: + def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: list_ip = self._parse_interfaces(block_nodes, name_interface) IpHelper.drop_input_traffic_to_node(node, list_ip) time.sleep(wakeup_timeout) self.dropped_traffic.append(node) @reporter.step("Start traffic to {node}") - def restore_traffic( - self, - node: ClusterNode, - ) -> None: + def restore_traffic(self, node: ClusterNode) -> None: IpHelper.restore_input_traffic_to_node(node=node) index = self.dropped_traffic.index(node) self.dropped_traffic.pop(index) @@ -410,9 +403,7 @@ class ClusterStateController: @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: frostfs_adm = FrostfsAdm( - shell=cluster_node.host.get_shell(), - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - config_file=FROSTFS_ADM_CONFIG_PATH, + shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH ) frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") @@ -453,6 +444,25 @@ class ClusterStateController: else: assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" + def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None: + alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0] + remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage) + self.excluded_from_netmap.extend(removes_nodes) + + def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode): + include_node_to_network_map(include_node, alive_node, self.shell, self.cluster) + self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node)) + + def include_all_excluded_nodes(self): + if not self.excluded_from_netmap: + return + alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0] + if not alive_node: + return + + for exclude_node in self.excluded_from_netmap.copy(): + self.include_node_to_netmap(exclude_node, alive_node) + def _get_cli( self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode ) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: @@ -469,11 +479,7 @@ class ClusterStateController: frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) - frostfs_cli_remote = FrostfsCli( - shell=shell, - frostfs_cli_exec_path=FROSTFS_CLI_EXEC, - config_file=wallet_config_path, - ) + frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) return frostfs_adm, frostfs_cli, frostfs_cli_remote def _enable_date_synchronizer(self, cluster_node: ClusterNode): @@ -536,8 +542,5 @@ class ClusterStateController: @reporter.step("Get contract by domain - {domain_name}") def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): - frostfs_adm = FrostfsAdm( - shell=cluster_node.host.get_shell(), - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - ) + frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC) return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index d192de5..55a8388 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -90,3 +90,6 @@ class Chunk: def __str__(self) -> str: return self.object_id + + def __repr__(self) -> str: + return self.object_id diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index d1bba9f..7f3161c 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -8,6 +8,7 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.cli_utils import parse_netmap_output @@ -42,6 +43,7 @@ class ChunksOperations(interfaces.ChunksInterface): if cluster_node.host_ip == node_info.node: return (cluster_node, node_info) + @wait_for_success(300, 5, fail_testcase=None) @reporter.step("Search shard with chunk {chunk}") def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}" @@ -63,7 +65,7 @@ class ChunksOperations(interfaces.ChunksInterface): address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, - trace: bool = False, + trace: bool = True, root: bool = False, verify_presence_all: bool = False, json: bool = True, @@ -86,7 +88,7 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr=xhdr, timeout=timeout, ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0]) @reporter.step("Get last parity chunk") def get_parity( @@ -97,7 +99,7 @@ class ChunksOperations(interfaces.ChunksInterface): bearer: Optional[str] = None, generate_key: Optional[bool] = None, oid: Optional[str] = None, - trace: bool = False, + trace: bool = True, root: bool = False, verify_presence_all: bool = False, json: bool = True, @@ -120,7 +122,7 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr=xhdr, timeout=timeout, ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1] @reporter.step("Get first data chunk") def get_first_data( diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index c8360ea..7a637d7 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -8,7 +8,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.s3.interfaces import BucketContainerResolver -from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.utils import json_utils @@ -266,6 +266,7 @@ class ContainerOperations(interfaces.ContainerInterface): self, endpoint: str, cid: str, + cluster: Cluster, address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index 63a2922..0e14aec 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -509,6 +509,7 @@ class ObjectOperations(interfaces.ObjectInterface): cid: str, endpoint: str, bearer: str = "", + oid: Optional[str] = None, filters: Optional[dict] = None, expected_objects_list: Optional[list] = None, xhdr: Optional[dict] = None, @@ -516,6 +517,9 @@ class ObjectOperations(interfaces.ObjectInterface): phy: bool = False, root: bool = False, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, ) -> list: """ SEARCH an Object. @@ -541,11 +545,15 @@ class ObjectOperations(interfaces.ObjectInterface): rpc_endpoint=endpoint, cid=cid, bearer=bearer, + oid=oid, xhdr=xhdr, filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, session=session, phy=phy, root=root, + address=address, + generate_key=generate_key, + ttl=ttl, timeout=timeout, ) diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py index 1947435..c293c2d 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -235,6 +235,7 @@ class ObjectInterface(ABC): cid: str, endpoint: str, bearer: str = "", + oid: Optional[str] = None, filters: Optional[dict] = None, expected_objects_list: Optional[list] = None, xhdr: Optional[dict] = None, @@ -242,6 +243,9 @@ class ObjectInterface(ABC): phy: bool = False, root: bool = False, timeout: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, ) -> List: pass @@ -368,6 +372,7 @@ class ContainerInterface(ABC): self, endpoint: str, cid: str, + cluster: Cluster, address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, @@ -376,7 +381,7 @@ class ContainerInterface(ABC): xhdr: Optional[dict] = None, generate_key: Optional[bool] = None, timeout: Optional[str] = None, - ) -> List[str]: + ) -> List[ClusterNode]: """Show the nodes participating in the container in the current epoch.""" raise NotImplementedError("No implemethed method nodes") From cef64e315ee5e872f1f1ebc9eaefcd4b5bfefc9c Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Wed, 11 Sep 2024 19:39:25 +0300 Subject: [PATCH 203/274] [#267] add no rule found object and morph chain --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 126 ++++++++++++++++++ .../resources/error_patterns.py | 1 + 2 files changed, 127 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index d8fd61c..5b808ca 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -350,3 +350,129 @@ class FrostfsAdmMorph(CliCommand): if param not in ["self", "node_netmap_keys"] }, ) + + def add_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + rule: Optional[list[str]] = None, + path: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + path: Path to encoded chain in JSON or binary format + rule: Rule statement + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control add-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address string Address of wallet account + chain-id string Chain id + chain-id-hex Flag to parse chain ID as hex + endpoint string Remote node control address (as 'multiaddr' or ':') + target-name string Resource name in APE resource name format + target-type string Resource type(container/namespace) + timeout duration Timeout for an operation (default 15s) + wallet string Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control get-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_rules( + self, + target_type: str, + target_name: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape list-rule-chains", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + all: Optional[bool] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + all: Remove all chains + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control remove-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) \ No newline at end of file diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 3b9231e..3ba5f13 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -29,3 +29,4 @@ S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" +NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" From 24b8ca73d74fbf7a52c733e72dc1e4127f55ceac Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Wed, 11 Sep 2024 22:00:21 +0300 Subject: [PATCH 204/274] [#291] get namespace endpoint --- src/frostfs_testlib/storage/dataclasses/frostfs_services.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 1420356..4f5c348 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -39,6 +39,9 @@ class S3Gate(NodeBase): def get_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) + def get_ns_endpoint(self, ns_name: str) -> str: + return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name) + def get_all_endpoints(self) -> list[str]: return [ self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), From 2976e30b75d25ad00d62529e0a68beda490ce795 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Mon, 7 Oct 2024 15:59:00 +0300 Subject: [PATCH 205/274] [#299] Add fuse to prevent similar names generation Signed-off-by: a.berezin --- src/frostfs_testlib/utils/string_utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index 80efa65..726c792 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -1,3 +1,4 @@ +import itertools import random import re import string @@ -7,6 +8,8 @@ ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation +FUSE = itertools.cycle(range(5)) + def unique_name(prefix: str = "", postfix: str = ""): """ @@ -18,7 +21,7 @@ def unique_name(prefix: str = "", postfix: str = ""): Returns: unique name string """ - return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{postfix}" + return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{next(FUSE)}{postfix}" def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): From a04eba8aecdbbc9285141c82328291eb0bf0e9b9 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 11 Oct 2024 12:23:32 +0300 Subject: [PATCH 206/274] [#302] Autoadd marks for frostfs Signed-off-by: a.berezin --- src/frostfs_testlib/__init__.py | 1 + src/frostfs_testlib/hooks.py | 12 ++++++++++++ src/frostfs_testlib/utils/string_utils.py | 1 + 3 files changed, 14 insertions(+) create mode 100644 src/frostfs_testlib/hooks.py diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index f3143e6..1ceb972 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,3 +1,4 @@ __version__ = "2.0.1" from .fixtures import configure_testlib, hosting, temp_directory +from .hooks import pytest_collection_modifyitems diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py new file mode 100644 index 0000000..df89bff --- /dev/null +++ b/src/frostfs_testlib/hooks.py @@ -0,0 +1,12 @@ +import pytest + + +@pytest.hookimpl +def pytest_collection_modifyitems(items: list[pytest.Item]): + # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding + # nodeid = full path of the test + # 1. plugins + # 2. testlib itself + for item in items: + if "frostfs" in item.nodeid and "plugin" not in item.nodeid and "testlib" not in item.nodeid: + item.add_marker("frostfs") diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index 726c792..acbca92 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -8,6 +8,7 @@ ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation +# if unique_name is called multiple times within the same microsecond, append 0-4 to the name so it surely unique FUSE = itertools.cycle(range(5)) From 2a41f2b0f64316efd83889b88b19ad7d966cb948 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 23 Sep 2024 17:54:40 +0300 Subject: [PATCH 207/274] [#301] Added interfaces for put/get lifecycle configuration to s3 clients --- pyproject.toml | 4 +-- requirements.txt | 4 +-- src/frostfs_testlib/cli/frostfs_adm/morph.py | 12 ++------ src/frostfs_testlib/s3/aws_cli_client.py | 30 +++++++++++++++++++ src/frostfs_testlib/s3/boto3_client.py | 21 +++++++++++++ src/frostfs_testlib/s3/interfaces.py | 12 ++++++++ src/frostfs_testlib/steps/epoch.py | 11 +++++-- .../testing/cluster_test_base.py | 8 ++--- 8 files changed, 80 insertions(+), 22 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 296ce65..3faa637 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,8 +27,8 @@ dependencies = [ "testrail-api>=1.12.0", "pytest==7.1.2", "tenacity==8.0.1", - "boto3==1.16.33", - "boto3-stubs[essential]==1.16.33", + "boto3==1.35.30", + "boto3-stubs[essential]==1.35.30", ] requires-python = ">=3.10" diff --git a/requirements.txt b/requirements.txt index 32e604f..e012366 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,8 +8,8 @@ docstring_parser==0.15 testrail-api==1.12.0 tenacity==8.0.1 pytest==7.1.2 -boto3==1.16.33 -boto3-stubs[essential]==1.16.33 +boto3==1.35.30 +boto3-stubs[essential]==1.35.30 # Dev dependencies black==22.8.0 diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 5b808ca..eea0985 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -69,9 +69,7 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def set_config( - self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None - ) -> CommandResult: + def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: """Add/update global config value in the FrostFS network. Args: @@ -125,7 +123,7 @@ class FrostfsAdmMorph(CliCommand): ) def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None + self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None ) -> CommandResult: """Create new FrostFS epoch event in the side chain. @@ -344,11 +342,7 @@ class FrostfsAdmMorph(CliCommand): return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self", "node_netmap_keys"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, ) def add_rule( diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 8169afe..2482376 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -754,6 +754,36 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("ObjectLockConfiguration") + @reporter.step("Put bucket lifecycle configuration") + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Get bucket lifecycle configuration") + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Delete bucket lifecycle configuration") + def delete_bucket_lifecycle(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + @staticmethod def _to_json(output: str) -> dict: json_output = {} diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index a644a6f..b638939 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -296,6 +296,27 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.delete_bucket_cors(Bucket=bucket) log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_cors result", response, {"Bucket": bucket}) + @reporter.step("Put bucket lifecycle configuration") + @report_error + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + response = self.boto3_client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle_configuration) + log_command_execution(self.s3gate_endpoint, "S3 put_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) + return response + + @reporter.step("Get bucket lifecycle configuration") + @report_error + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + response = self.boto3_client.get_bucket_lifecycle_configuration(Bucket=bucket) + log_command_execution(self.s3gate_endpoint, "S3 get_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) + return {"Rules": response.get("Rules")} + + @reporter.step("Delete bucket lifecycle configuration") + @report_error + def delete_bucket_lifecycle(self, bucket: str) -> dict: + response = self.boto3_client.delete_bucket_lifecycle(Bucket=bucket) + log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_lifecycle result", response, {"Bucket": bucket}) + return response + # END OF BUCKET METHODS # # OBJECT METHODS # diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index b1825d5..da4fc6b 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -366,6 +366,18 @@ class S3ClientWrapper(HumanReadableABC): def delete_object_tagging(self, bucket: str, key: str) -> None: """Removes the entire tag set from the specified object.""" + @abstractmethod + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + """Adds or updates bucket lifecycle configuration""" + + @abstractmethod + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + """Gets bucket lifecycle configuration""" + + @abstractmethod + def delete_bucket_lifecycle(self, bucket: str) -> dict: + """Deletes bucket lifecycle""" + @abstractmethod def get_object_attributes( self, diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index ce7ed12..6ec5483 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -69,7 +69,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] @reporter.step("Tick Epoch") -def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): +def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None, delta: Optional[int] = None): """ Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) Args: @@ -88,12 +88,17 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH, ) - frostfs_adm.morph.force_new_epoch() + frostfs_adm.morph.force_new_epoch(delta=delta) return # Otherwise we tick epoch using transaction cur_epoch = get_epoch(shell, cluster) + if delta: + next_epoch = cur_epoch + delta + else: + next_epoch = cur_epoch + 1 + # Use first node by default ir_node = cluster.services(InnerRing)[0] # In case if no local_wallet_path is provided, we use wallet_path @@ -110,7 +115,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] wallet_password=ir_wallet_pass, scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell), method="newEpoch", - arguments=f"int:{cur_epoch + 1}", + arguments=f"int:{next_epoch}", multisig_hash=f"{ir_address}:Global", address=ir_address, rpc_endpoint=morph_endpoint, diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index f2e10ad..50c8eb6 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -25,12 +25,8 @@ class ClusterTestBase: for _ in range(epochs_to_tick): self.tick_epoch(alive_node, wait_block) - def tick_epoch( - self, - alive_node: Optional[StorageNode] = None, - wait_block: int = None, - ): - epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) + def tick_epoch(self, alive_node: Optional[StorageNode] = None, wait_block: int = None, delta: Optional[int] = None): + epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node, delta=delta) if wait_block: self.wait_for_blocks(wait_block) From cf48f474ebb8aea4798e007c931ca157eb8fd7ea Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Thu, 10 Oct 2024 10:39:54 +0300 Subject: [PATCH 208/274] [#303] add check if registry is on hdd Signed-off-by: m.malygina --- src/frostfs_testlib/load/interfaces/scenario_runner.py | 5 +++++ src/frostfs_testlib/load/runners.py | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/interfaces/scenario_runner.py b/src/frostfs_testlib/load/interfaces/scenario_runner.py index 45c1317..c0062a9 100644 --- a/src/frostfs_testlib/load/interfaces/scenario_runner.py +++ b/src/frostfs_testlib/load/interfaces/scenario_runner.py @@ -1,5 +1,6 @@ from abc import ABC, abstractmethod +from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import LoadParams from frostfs_testlib.storage.cluster import ClusterNode @@ -48,3 +49,7 @@ class ScenarioRunner(ABC): @abstractmethod def get_results(self) -> dict: """Get results from K6 run""" + + @abstractmethod + def get_loaders(self) -> list[Loader]: + """Return loaders""" diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index a34786f..1ceac09 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -30,6 +30,7 @@ from frostfs_testlib.utils.file_keeper import FileKeeper class RunnerBase(ScenarioRunner): k6_instances: list[K6] + loaders: list[Loader] @reporter.step("Run preset on loaders") def preset(self): @@ -49,9 +50,11 @@ class RunnerBase(ScenarioRunner): def get_k6_instances(self): return self.k6_instances + def get_loaders(self) -> list[Loader]: + return self.loaders + class DefaultRunner(RunnerBase): - loaders: list[Loader] user: User def __init__( @@ -228,7 +231,6 @@ class DefaultRunner(RunnerBase): class LocalRunner(RunnerBase): - loaders: list[Loader] cluster_state_controller: ClusterStateController file_keeper: FileKeeper user: User From 738cfacbb7416d792c95e034bb8355acd7b1c7dd Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Mon, 7 Oct 2024 17:33:45 +0300 Subject: [PATCH 209/274] [#300] Refactor tests: use `unique_name` instead `hex + timestamp` Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/credentials/authmate_s3_provider.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/credentials/authmate_s3_provider.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py index 66c5015..ed6454b 100644 --- a/src/frostfs_testlib/credentials/authmate_s3_provider.py +++ b/src/frostfs_testlib/credentials/authmate_s3_provider.py @@ -1,5 +1,4 @@ import re -from datetime import datetime from typing import Optional from frostfs_testlib import reporter @@ -10,6 +9,7 @@ from frostfs_testlib.shell import LocalShell from frostfs_testlib.steps.cli.container import list_containers from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate +from frostfs_testlib.utils import string_utils class AuthmateS3CredentialsProvider(S3CredentialsProvider): @@ -22,7 +22,7 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider): gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] # unique short bucket name - bucket = f"bucket-{hex(int(datetime.now().timestamp()*1000000))}" + bucket = string_utils.unique_name("bucket-") frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) issue_secret_output = frostfs_authmate.secret.issue( From 5fa58a55c05f006b81954bc571e7a9e1cca1ffed Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 18 Oct 2024 13:25:12 +0300 Subject: [PATCH 210/274] [#304] Improve logging Boto3 IAM methods Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/boto3_client.py | 174 +++++++++++++++++++------ 1 file changed, 135 insertions(+), 39 deletions(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index b638939..a99b866 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -68,6 +68,7 @@ class Boto3ClientWrapper(S3ClientWrapper): self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.s3gate_endpoint: str = "" + self.iam_endpoint: str = "" self.boto3_iam_client: S3Client = None self.set_endpoint(s3gate_endpoint) @@ -90,11 +91,16 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Set endpoint IAM to {iam_endpoint}") def set_iam_endpoint(self, iam_endpoint: str): + if self.iam_endpoint == iam_endpoint: + return + + self.iam_endpoint = iam_endpoint + self.boto3_iam_client = self.session.client( service_name="iam", aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, - endpoint_url=iam_endpoint, + endpoint_url=self.iam_endpoint, verify=False, ) @@ -687,25 +693,36 @@ class Boto3ClientWrapper(S3ClientWrapper): # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) @reporter.step("Adds the specified user to the specified group") + @report_error def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.add_user_to_group(**params) + log_command_execution(self.iam_endpoint, "IAM Add User to Group", response, params) return response @reporter.step("Attaches the specified managed policy to the specified IAM group") + @report_error def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.attach_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Attach Group Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Attaches the specified managed policy to the specified user") + @report_error def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.attach_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Attach User Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + @report_error def iam_create_access_key(self, user_name: str) -> dict: response = self.boto3_iam_client.create_access_key(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Create Access Key", response, {"UserName": user_name}) access_key_id = response["AccessKey"].get("AccessKeyId") secret_access_key = response["AccessKey"].get("SecretAccessKey") @@ -715,138 +732,190 @@ class Boto3ClientWrapper(S3ClientWrapper): return access_key_id, secret_access_key @reporter.step("Creates a new group") + @report_error def iam_create_group(self, group_name: str) -> dict: response = self.boto3_iam_client.create_group(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM Create Group", response, {"GroupName": group_name}) + assert response.get("Group"), f"Expected Group in response:\n{response}" assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" return response @reporter.step("Creates a new managed policy for your AWS account") + @report_error def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + params = self._convert_to_s3_params(locals().items()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self.boto3_iam_client.create_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Create Policy", response, params) + assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" return response @reporter.step("Creates a new IAM user for your AWS account") + @report_error def iam_create_user(self, user_name: str) -> dict: response = self.boto3_iam_client.create_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Create User", response, {"UserName": user_name}) + assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" return response @reporter.step("Deletes the access key pair associated with the specified IAM user") + @report_error def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.delete_access_key(**params) + log_command_execution(self.iam_endpoint, "IAM Delete Access Key", response, params) return response @reporter.step("Deletes the specified IAM group") + @report_error def iam_delete_group(self, group_name: str) -> dict: response = self.boto3_iam_client.delete_group(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM Delete Group", response, {"GroupName": group_name}) return response @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + @report_error def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.delete_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Delete Group Policy", response, params) return response @reporter.step("Deletes the specified managed policy") + @report_error def iam_delete_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM Delete Policy", response, {"PolicyArn": policy_arn}) return response @reporter.step("Deletes the specified IAM user") + @report_error def iam_delete_user(self, user_name: str) -> dict: response = self.boto3_iam_client.delete_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Delete User", response, {"UserName": user_name}) return response @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + @report_error def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.delete_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Delete User Policy", response, params) return response @reporter.step("Removes the specified managed policy from the specified IAM group") + @report_error def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.detach_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Detach Group Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified managed policy from the specified user") + @report_error def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.detach_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Detach User Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") + @report_error def iam_get_group(self, group_name: str) -> dict: response = self.boto3_iam_client.get_group(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM Get Group", response, {"GroupName": group_name}) assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + @report_error def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) - + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.get_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Get Group Policy", response, params) return response @reporter.step("Retrieves information about the specified managed policy") + @report_error def iam_get_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM Get Policy", response, {"PolicyArn": policy_arn}) + assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" return response @reporter.step("Retrieves information about the specified version of the specified managed policy") + @report_error def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.get_policy_version(**params) + log_command_execution(self.iam_endpoint, "IAM Get Policy Version", response, params) + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" return response @reporter.step("Retrieves information about the specified IAM user") + @report_error def iam_get_user(self, user_name: str) -> dict: response = self.boto3_iam_client.get_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Get User", response, {"UserName": user_name}) + assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + @report_error def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.get_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Get User Policy", response, params) assert response.get("UserName"), f"Expected UserName in response:\n{response}" - return response @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + @report_error def iam_list_access_keys(self, user_name: str) -> dict: response = self.boto3_iam_client.list_access_keys(UserName=user_name) - + log_command_execution(self.iam_endpoint, "IAM List Access Keys", response, {"UserName": user_name}) return response @reporter.step("Lists all managed policies that are attached to the specified IAM group") + @report_error def iam_list_attached_group_policies(self, group_name: str) -> dict: response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM List Attached Group Policies", response, {"GroupName": group_name}) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" - return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") + @report_error def iam_list_attached_user_policies(self, user_name: str) -> dict: response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List Attached User Policies", response, {"UserName": user_name}) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" - return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + @report_error def iam_list_entities_for_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM List Entities For Policy", response, {"PolicyArn": policy_arn}) assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" @@ -854,98 +923,125 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + @report_error def iam_list_group_policies(self, group_name: str) -> dict: response = self.boto3_iam_client.list_group_policies(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM List Group Policies", response, {"GroupName": group_name}) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" - return response @reporter.step("Lists the IAM groups") + @report_error def iam_list_groups(self) -> dict: response = self.boto3_iam_client.list_groups() + log_command_execution(self.iam_endpoint, "IAM List Groups", response) assert response.get("Groups"), f"Expected Groups in response:\n{response}" - return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + @report_error def iam_list_groups_for_user(self, user_name: str) -> dict: response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List Groups For User", response, {"UserName": user_name}) assert response.get("Groups"), f"Expected Groups in response:\n{response}" - return response @reporter.step("Lists all the managed policies that are available in your AWS account") + @report_error def iam_list_policies(self) -> dict: response = self.boto3_iam_client.list_policies() + log_command_execution(self.iam_endpoint, "IAM List Policies", response) assert response.get("Policies"), f"Expected Policies in response:\n{response}" - return response @reporter.step("Lists information about the versions of the specified managed policy") + @report_error def iam_list_policy_versions(self, policy_arn: str) -> dict: response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM List Policy Versions", response, {"PolicyArn": policy_arn}) assert response.get("Versions"), f"Expected Versions in response:\n{response}" - return response @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + @report_error def iam_list_user_policies(self, user_name: str) -> dict: response = self.boto3_iam_client.list_user_policies(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List User Policies", response, {"UserName": user_name}) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" - return response @reporter.step("Lists the IAM users") + @report_error def iam_list_users(self) -> dict: response = self.boto3_iam_client.list_users() + log_command_execution(self.iam_endpoint, "IAM List Users", response) assert response.get("Users"), f"Expected Users in response:\n{response}" - return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + @report_error def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.put_group_policy( - GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) - ) + params = self._convert_to_s3_params(locals().items()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self.boto3_iam_client.put_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Put Group Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + @report_error def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.put_user_policy( - UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) - ) + params = self._convert_to_s3_params(locals().items()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self.boto3_iam_client.put_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Put User Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified user from the specified group") + @report_error def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.remove_user_from_group(**params) + log_command_execution(self.iam_endpoint, "IAM Remove User From Group", response, params) return response @reporter.step("Updates the name and/or the path of the specified IAM group") + @report_error def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath="/") - + params = {"GroupName": group_name, "NewGroupName": new_name, "NewPath": "/"} + response = self.boto3_iam_client.update_group(**params) + log_command_execution(self.iam_endpoint, "IAM Update Group", response, params) return response @reporter.step("Updates the name and/or the path of the specified IAM user") + @report_error def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath="/") + params = {"UserName": user_name, "NewUserName": new_name, "NewPath": "/"} + response = self.boto3_iam_client.update_user(**params) + log_command_execution(self.iam_endpoint, "IAM Update User", response, params) return response @reporter.step("Adds one or more tags to an IAM user") + @report_error def iam_tag_user(self, user_name: str, tags: list) -> dict: - tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - response = self.boto3_iam_client.tag_user(UserName=user_name, Tags=tags_json) + params = self._convert_to_s3_params(locals().items()) + params["Tags"] = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + response = self.boto3_iam_client.tag_user(**params) + log_command_execution(self.iam_endpoint, "IAM Tag User", response, params) return response @reporter.step("List tags of IAM user") + @report_error def iam_list_user_tags(self, user_name: str) -> dict: response = self.boto3_iam_client.list_user_tags(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List User Tags", response, {"UserName": user_name}) return response @reporter.step("Removes the specified tags from the user") + @report_error def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - response = self.boto3_iam_client.untag_user(UserName=user_name, TagKeys=tag_keys) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.untag_user(**params) + log_command_execution(self.iam_endpoint, "IAM Untag User", response, params) return response From 3f3be83d90cb3226268f00746e67f433b63c90be Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 21 Oct 2024 09:01:37 +0300 Subject: [PATCH 211/274] [#305] Added IAM abstract method --- src/frostfs_testlib/s3/interfaces.py | 4 ++++ src/frostfs_testlib/steps/metrics.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index da4fc6b..c084484 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -58,6 +58,10 @@ class S3ClientWrapper(HumanReadableABC): def set_endpoint(self, s3gate_endpoint: str): """Set endpoint""" + @abstractmethod + def set_iam_endpoint(self, iam_endpoint: str): + """Set iam endpoint""" + @abstractmethod def create_bucket( self, diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py index 29e49d4..a9e545a 100644 --- a/src/frostfs_testlib/steps/metrics.py +++ b/src/frostfs_testlib/steps/metrics.py @@ -1,8 +1,8 @@ import re from frostfs_testlib import reporter -from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import wait_for_success @reporter.step("Check metrics result") @@ -19,7 +19,7 @@ def check_metrics_counter( counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) assert eval( f"{counter_act} {operator} {counter_exp}" - ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}" + ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}" @reporter.step("Get metrics value from node: {node}") From b2bf6677f184fdb2d92045d753722fd651091e46 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 25 Oct 2024 18:52:43 +0300 Subject: [PATCH 212/274] [#310] Update test marking Signed-off-by: a.berezin --- src/frostfs_testlib/hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index df89bff..6830e78 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -8,5 +8,6 @@ def pytest_collection_modifyitems(items: list[pytest.Item]): # 1. plugins # 2. testlib itself for item in items: - if "frostfs" in item.nodeid and "plugin" not in item.nodeid and "testlib" not in item.nodeid: + location = item.location[0] + if "frostfs" in location and "plugin" not in location and "testlib" not in location: item.add_marker("frostfs") From e6faddedeb008950583174659eb52374bd475e5d Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Mon, 21 Oct 2024 23:47:47 +0300 Subject: [PATCH 213/274] [#297] add morph rule chain --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 28 ++++++------------- .../storage/dataclasses/ape.py | 15 ++++++++++ 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index eea0985..7228692 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -122,9 +122,7 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None - ) -> CommandResult: + def force_new_epoch(self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: """Create new FrostFS epoch event in the side chain. Args: @@ -343,11 +341,11 @@ class FrostfsAdmMorph(CliCommand): return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, + **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, ) - + def add_rule( self, - endpoint: str, chain_id: str, target_name: str, target_type: str, @@ -361,10 +359,8 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address: Address of wallet account chain-id: Assign ID to the parsed chain chain-id-hex: Flag to parse chain ID as hex - endpoint: Remote node control address (as 'multiaddr' or ':') path: Path to encoded chain in JSON or binary format rule: Rule statement target-name: Resource name in APE resource name format @@ -376,13 +372,12 @@ class FrostfsAdmMorph(CliCommand): Command`s result. """ return self._execute( - "control add-rule", + "morph ape add-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, ) def get_rule( self, - endpoint: str, chain_id: str, target_name: str, target_type: str, @@ -394,10 +389,8 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address string Address of wallet account chain-id string Chain id chain-id-hex Flag to parse chain ID as hex - endpoint string Remote node control address (as 'multiaddr' or ':') target-name string Resource name in APE resource name format target-type string Resource type(container/namespace) timeout duration Timeout for an operation (default 15s) @@ -407,7 +400,7 @@ class FrostfsAdmMorph(CliCommand): Command`s result. """ return self._execute( - "control get-rule", + "morph ape get-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, ) @@ -423,8 +416,6 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') target-name: Resource name in APE resource name format target-type: Resource type(container/namespace) timeout: Timeout for an operation (default 15s) @@ -437,10 +428,9 @@ class FrostfsAdmMorph(CliCommand): "morph ape list-rule-chains", **{param: value for param, value in locals().items() if param not in ["self"]}, ) - + def remove_rule( self, - endpoint: str, chain_id: str, target_name: str, target_type: str, @@ -453,11 +443,9 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address: Address of wallet account all: Remove all chains chain-id: Assign ID to the parsed chain chain-id-hex: Flag to parse chain ID as hex - endpoint: Remote node control address (as 'multiaddr' or ':') target-name: Resource name in APE resource name format target-type: Resource type(container/namespace) timeout: Timeout for an operation (default 15s) @@ -467,6 +455,6 @@ class FrostfsAdmMorph(CliCommand): Command`s result. """ return self._execute( - "control remove-rule", + "morph ape rm-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, - ) \ No newline at end of file + ) diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index b6563f4..f0f1758 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -26,6 +26,21 @@ class ObjectOperations(HumanReadableEnum): return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] +@dataclass +class Operations: + GET_CONTAINER = "GetContainer" + PUT_CONTAINER = "PutContainer" + DELETE_CONTAINER = "DeleteContainer" + LIST_CONTAINER = "ListContainers" + GET_OBJECT = "GetObject" + DELETE_OBJECT = "DeleteObject" + HASH_OBJECT = "HashObject" + RANGE_OBJECT = "RangeObject" + SEARCH_OBJECT = "SearchObject" + HEAD_OBJECT = "HeadObject" + PUT_OBJECT = "PutObject" + + class Verb(HumanReadableEnum): ALLOW = "allow" DENY = "deny" From 3d6a356e20b5ce13350b1507d7d45e74749b37d7 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 18 Oct 2024 15:57:40 +0300 Subject: [PATCH 214/274] [#306] Fix handling of bucket names in AWS CLI - Add quotes around container names if they contain spaces or `-`. Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/aws_cli_client.py | 154 +++++++++++++++++++++++ 1 file changed, 154 insertions(+) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 2482376..ff4e329 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -70,6 +70,9 @@ class AwsCliClient(S3ClientWrapper): if bucket is None: bucket = string_utils.unique_name("bucket-") + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + if object_lock_enabled_for_bucket is None: object_lock = "" elif object_lock_enabled_for_bucket: @@ -103,16 +106,25 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd, command_options) @reporter.step("Head bucket S3") def head_bucket(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd) @reporter.step("Put bucket versioning status") def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " f"--versioning-configuration Status={status.value} " @@ -122,6 +134,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket versioning status") def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -132,6 +147,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket tagging") def put_bucket_tagging(self, bucket: str, tags: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} cmd = ( f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " @@ -141,6 +159,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -151,6 +172,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) @@ -160,6 +184,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -170,6 +197,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -181,6 +211,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects S3 v2") def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -195,6 +228,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects versions S3") def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -205,6 +241,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects delete markers S3") def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -228,8 +267,13 @@ class AwsCliClient(S3ClientWrapper): ) -> str: if bucket is None: bucket = source_bucket + + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + if key is None: key = string_utils.unique_name("copy-object-") + copy_source = f"{source_bucket}/{source_key}" cmd = ( @@ -266,6 +310,9 @@ class AwsCliClient(S3ClientWrapper): grant_full_control: Optional[str] = None, grant_read: Optional[str] = None, ) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + if key is None: key = os.path.basename(filepath) @@ -297,6 +344,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Head object S3") def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " @@ -315,6 +365,9 @@ class AwsCliClient(S3ClientWrapper): object_range: Optional[tuple[int, int]] = None, full_output: bool = False, ) -> dict | TestFile: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -329,6 +382,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get object ACL") def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " @@ -347,6 +403,9 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -369,6 +428,9 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -383,6 +445,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete objects S3") def delete_objects(self, bucket: str, keys: list[str]) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") delete_structure = json.dumps(_make_objs_dict(keys)) with open(file_path, "w") as out_file: @@ -399,6 +464,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object S3") def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api delete-object --bucket {bucket} " @@ -409,6 +477,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object versions S3") def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + # Build deletion list in S3 format delete_list = { "Objects": [ @@ -435,6 +506,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object versions S3 without delete markers") def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + # Delete objects without creating delete markers for object_version in object_versions: self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) @@ -450,6 +524,8 @@ class AwsCliClient(S3ClientWrapper): part_number: int = 0, full_output: bool = True, ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' attrs = ",".join(attributes) version = f" --version-id {version_id}" if version_id else "" @@ -473,6 +549,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket policy") def get_bucket_policy(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -483,6 +562,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket policy") def delete_bucket_policy(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -493,6 +575,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket policy") def put_bucket_policy(self, bucket: str, policy: dict) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + # Leaving it as is was in test repo. Double dumps to escape resulting string # Example: # policy = {"a": 1} @@ -508,6 +593,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -518,6 +606,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket cors") def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -526,6 +617,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -534,6 +628,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -549,6 +646,9 @@ class AwsCliClient(S3ClientWrapper): version_id: Optional[str] = None, bypass_governance_retention: Optional[bool] = None, ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " @@ -566,6 +666,9 @@ class AwsCliClient(S3ClientWrapper): legal_hold_status: Literal["ON", "OFF"], version_id: Optional[str] = None, ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" legal_hold = json.dumps({"Status": legal_hold_status}) cmd = ( @@ -576,6 +679,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put object tagging") def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} version = f" --version-id {version_id}" if version_id else "" @@ -587,6 +693,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get object tagging") def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " @@ -598,6 +707,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object tagging") def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " @@ -613,6 +725,9 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) @@ -633,6 +748,9 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}" @@ -648,6 +766,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Create multipart upload S3") def create_multipart_upload(self, bucket: str, key: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -661,6 +782,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List multipart uploads S3") def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -671,6 +795,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Abort multipart upload S3") def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -679,6 +806,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Upload part S3") def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " @@ -691,6 +821,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Upload copy part S3") def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " @@ -704,6 +837,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List parts S3") def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -717,6 +853,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Complete multipart upload S3") def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} @@ -737,6 +876,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -746,6 +888,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get object lock configuration") def get_object_lock_configuration(self, bucket: str): + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -756,6 +901,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket lifecycle configuration") def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}" @@ -766,6 +914,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket lifecycle configuration") def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -776,6 +927,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket lifecycle configuration") def delete_bucket_lifecycle(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" From 26139767f4118f1655c067ffa316e6ae9ebf6064 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Wed, 23 Oct 2024 14:08:54 +0300 Subject: [PATCH 215/274] [#311] Add AWS CLI command to report from Boto3 request Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/boto3_client.py | 1000 +++++++++++++++--------- src/frostfs_testlib/utils/cli_utils.py | 72 +- 2 files changed, 672 insertions(+), 400 deletions(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index a99b866..91d8c5a 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -1,8 +1,8 @@ import json import logging import os +from collections.abc import Callable from datetime import datetime -from functools import wraps from time import sleep from typing import Literal, Optional, Union @@ -28,48 +28,32 @@ logger = logging.getLogger("NeoLogger") urllib3.disable_warnings() -def report_error(func): - @wraps(func) - def deco(*a, **kw): - try: - return func(*a, **kw) - except ClientError as err: - url = None - params = {"args": a, "kwargs": kw} - - if isinstance(a[0], Boto3ClientWrapper): - client: Boto3ClientWrapper = a[0] - url = client.s3gate_endpoint - params = {"args": a[1:], "kwargs": kw} - - log_command_execution(url, f"Failed {err.operation_name}", err.response, params) - raise - - return deco - - class Boto3ClientWrapper(S3ClientWrapper): __repr_name__: str = "Boto3 client" @reporter.step("Configure S3 client (boto3)") - @report_error def __init__( self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.boto3_client: S3Client = None - self.session = boto3.Session() + self.s3gate_endpoint: str = "" + + self.boto3_iam_client: S3Client = None + self.iam_endpoint: str = "" + + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key + self.profile = profile self.region = region + + self.session = boto3.Session() self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE, } ) - self.access_key_id: str = access_key_id - self.secret_access_key: str = secret_access_key - self.s3gate_endpoint: str = "" - self.iam_endpoint: str = "" - self.boto3_iam_client: S3Client = None + self.set_endpoint(s3gate_endpoint) @reporter.step("Set endpoint S3 to {s3gate_endpoint}") @@ -116,13 +100,24 @@ class Boto3ClientWrapper(S3ClientWrapper): return result def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict: - if not exclude: - exclude = ["self"] - return {self._to_s3_param(param): value for param, value in scope if param not in exclude and value is not None} + exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] + return {self._to_s3_param(param): value for param, value in scope.items() if param not in exclude and value is not None} + + def _exec_request(self, method: Callable, params: Optional[dict] = None, **kwargs): + if not params: + params = {} + + try: + result = method(**params) + except ClientError as err: + log_command_execution(method.__name__, err.response, params, **kwargs) + raise + + log_command_execution(method.__name__, result, params, **kwargs) + return result # BUCKET METHODS # @reporter.step("Create bucket S3") - @report_error def create_bucket( self, bucket: Optional[str] = None, @@ -151,81 +146,98 @@ class Boto3ClientWrapper(S3ClientWrapper): if location_constraint: params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) - s3_bucket = self.boto3_client.create_bucket(**params) - log_command_execution(self.s3gate_endpoint, f"Created S3 bucket {bucket}", s3_bucket, params) + self._exec_request(self.boto3_client.create_bucket, params, endpoint=self.s3gate_endpoint, profile=self.profile) return bucket @reporter.step("List buckets S3") - @report_error def list_buckets(self) -> list[str]: - found_buckets = [] - - response = self.boto3_client.list_buckets() - log_command_execution(self.s3gate_endpoint, "S3 List buckets result", response) - - for bucket in response["Buckets"]: - found_buckets.append(bucket["Name"]) - - return found_buckets + response = self._exec_request( + self.boto3_client.list_buckets, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return [bucket["Name"] for bucket in response["Buckets"]] @reporter.step("Delete bucket S3") - @report_error def delete_bucket(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Delete bucket result", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.delete_bucket, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Head bucket S3") - @report_error def head_bucket(self, bucket: str) -> None: - response = self.boto3_client.head_bucket(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Head bucket result", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.head_bucket, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket versioning status") - @report_error def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}} - response = self.boto3_client.put_bucket_versioning(**params) - log_command_execution(self.s3gate_endpoint, "S3 Set bucket versioning to", response, params) + self._exec_request( + self.boto3_client.put_bucket_versioning, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket versioning status") - @report_error def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: - response = self.boto3_client.get_bucket_versioning(Bucket=bucket) - status = response.get("Status") - log_command_execution(self.s3gate_endpoint, "S3 Got bucket versioning status", response, {"Bucket": bucket}) - return status + response = self._exec_request( + self.boto3_client.get_bucket_versioning, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Status") @reporter.step("Put bucket tagging") - @report_error def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) - response = self.boto3_client.put_bucket_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put bucket tagging", response, params) + params = self._convert_to_s3_params(locals(), exclude=["tags"]) + self._exec_request( + self.boto3_client.put_bucket_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket tagging") - @report_error def get_bucket_tagging(self, bucket: str) -> list: - response = self.boto3_client.get_bucket_tagging(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Get bucket tagging", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_tagging, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("TagSet") @reporter.step("Get bucket acl") - @report_error def get_bucket_acl(self, bucket: str) -> list: - response = self.boto3_client.get_bucket_acl(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Get bucket acl", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_acl, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Grants") @reporter.step("Delete bucket tagging") - @report_error def delete_bucket_tagging(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Delete bucket tagging", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.delete_bucket_tagging, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket ACL") - @report_error def put_bucket_acl( self, bucket: str, @@ -233,141 +245,181 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_bucket_acl(**params) - log_command_execution(self.s3gate_endpoint, "S3 ACL bucket result", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.put_bucket_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object lock configuration") - @report_error def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: params = {"Bucket": bucket, "ObjectLockConfiguration": configuration} - response = self.boto3_client.put_object_lock_configuration(**params) - log_command_execution(self.s3gate_endpoint, "S3 put_object_lock_configuration result", response, params) - return response + return self._exec_request( + self.boto3_client.put_object_lock_configuration, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get object lock configuration") - @report_error def get_object_lock_configuration(self, bucket: str) -> dict: - response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_object_lock_configuration result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_object_lock_configuration, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("ObjectLockConfiguration") @reporter.step("Get bucket policy") - @report_error def get_bucket_policy(self, bucket: str) -> str: - response = self.boto3_client.get_bucket_policy(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_policy result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_policy, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Policy") @reporter.step("Delete bucket policy") - @report_error def delete_bucket_policy(self, bucket: str) -> str: - response = self.boto3_client.delete_bucket_policy(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_policy result", response, {"Bucket": bucket}) - return response + return self._exec_request( + self.boto3_client.delete_bucket_policy, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket policy") - @report_error def put_bucket_policy(self, bucket: str, policy: dict) -> None: params = {"Bucket": bucket, "Policy": json.dumps(policy)} - response = self.boto3_client.put_bucket_policy(**params) - log_command_execution(self.s3gate_endpoint, "S3 put_bucket_policy result", response, params) - return response + return self._exec_request( + self.boto3_client.put_bucket_policy, + params, + # Overriding option for AWS CLI + policy=policy, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket cors") - @report_error def get_bucket_cors(self, bucket: str) -> dict: - response = self.boto3_client.get_bucket_cors(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_cors result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_cors, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("CORSRules") @reporter.step("Get bucket location") - @report_error def get_bucket_location(self, bucket: str) -> str: - response = self.boto3_client.get_bucket_location(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_location result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_location, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("LocationConstraint") @reporter.step("Put bucket cors") - @report_error def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_bucket_cors(**params) - log_command_execution(self.s3gate_endpoint, "S3 put_bucket_cors result", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.put_bucket_cors, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete bucket cors") - @report_error def delete_bucket_cors(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket_cors(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_cors result", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.delete_bucket_cors, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket lifecycle configuration") - @report_error def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: - response = self.boto3_client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle_configuration) - log_command_execution(self.s3gate_endpoint, "S3 put_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) - return response + params = self._convert_to_s3_params(locals(), exclude=["dumped_configuration"]) + return self._exec_request( + self.boto3_client.put_bucket_lifecycle_configuration, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket lifecycle configuration") - @report_error def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: - response = self.boto3_client.get_bucket_lifecycle_configuration(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_lifecycle_configuration, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return {"Rules": response.get("Rules")} @reporter.step("Delete bucket lifecycle configuration") - @report_error def delete_bucket_lifecycle(self, bucket: str) -> dict: - response = self.boto3_client.delete_bucket_lifecycle(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_lifecycle result", response, {"Bucket": bucket}) - return response + return self._exec_request( + self.boto3_client.delete_bucket_lifecycle, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) # END OF BUCKET METHODS # # OBJECT METHODS # @reporter.step("List objects S3 v2") - @report_error def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_objects_v2(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 v2 List objects result", response, params) - + response = self._exec_request( + self.boto3_client.list_objects_v2, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) obj_list = [obj["Key"] for obj in response.get("Contents", [])] logger.info(f"Found s3 objects: {obj_list}") - return response if full_output else obj_list @reporter.step("List objects S3") - @report_error def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_objects(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List objects result", response, params) - + response = self._exec_request( + self.boto3_client.list_objects, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) obj_list = [obj["Key"] for obj in response.get("Contents", [])] logger.info(f"Found s3 objects: {obj_list}") - return response if full_output else obj_list @reporter.step("List objects versions S3") - @report_error def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List objects versions result", response, params) + response = self._exec_request( + self.boto3_client.list_object_versions, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response if full_output else response.get("Versions", []) @reporter.step("List objects delete markers S3") - @report_error def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List objects delete markers result", response, params) + response = self._exec_request( + self.boto3_client.list_object_versions, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response if full_output else response.get("DeleteMarkers", []) @reporter.step("Put object S3") - @report_error def put_object( self, bucket: str, @@ -388,40 +440,53 @@ class Boto3ClientWrapper(S3ClientWrapper): with open(filepath, "rb") as put_file: body = put_file.read() - params = self._convert_to_s3_params(locals().items(), exclude=["self", "filepath", "put_file", "body"]) - response = self.boto3_client.put_object(Body=body, **params) - log_command_execution(self.s3gate_endpoint, "S3 Put object result", response, params) + params = self._convert_to_s3_params(locals(), exclude=["filepath", "put_file"]) + response = self._exec_request( + self.boto3_client.put_object, + params, + body=filepath, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("VersionId") @reporter.step("Head object S3") - @report_error def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.head_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Head object result", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.head_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete object S3") - @report_error def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.delete_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.delete_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete objects S3") - @report_error def delete_objects(self, bucket: str, keys: list[str]) -> dict: params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)} - response = self.boto3_client.delete_objects(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params) + response = self._exec_request( + self.boto3_client.delete_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' + return response @reporter.step("Delete object versions S3") - @report_error def delete_object_versions(self, bucket: str, object_versions: list) -> dict: # Build deletion list in S3 format delete_list = { @@ -434,21 +499,26 @@ class Boto3ClientWrapper(S3ClientWrapper): ] } params = {"Bucket": bucket, "Delete": delete_list} - response = self.boto3_client.delete_objects(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params) - return response + return self._exec_request( + self.boto3_client.delete_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete object versions S3 without delete markers") - @report_error def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]} - response = self.boto3_client.delete_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) + self._exec_request( + self.boto3_client.delete_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object ACL") - @report_error def put_object_acl( self, bucket: str, @@ -457,21 +527,27 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_object_acl(**params) - log_command_execution(self.s3gate_endpoint, "S3 put object ACL", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.put_object_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Grants") @reporter.step("Get object ACL") - @report_error def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.get_object_acl(**params) - log_command_execution(self.s3gate_endpoint, "S3 ACL objects result", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.get_object_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Grants") @reporter.step("Copy object S3") - @report_error def copy_object( self, source_bucket: str, @@ -486,17 +562,22 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> str: if bucket is None: bucket = source_bucket + if key is None: key = string_utils.unique_name("copy-object-") - copy_source = f"{source_bucket}/{source_key}" - params = self._convert_to_s3_params(locals().items(), exclude=["self", "source_bucket", "source_key"]) - response = self.boto3_client.copy_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Copy objects result", response, params) + copy_source = f"{source_bucket}/{source_key}" + params = self._convert_to_s3_params(locals(), exclude=["source_bucket", "source_key"]) + + self._exec_request( + self.boto3_client.copy_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return key @reporter.step("Get object S3") - @report_error def get_object( self, bucket: str, @@ -509,12 +590,15 @@ class Boto3ClientWrapper(S3ClientWrapper): if object_range: range_str = f"bytes={object_range[0]}-{object_range[1]}" - params = self._convert_to_s3_params( - {**locals(), **{"Range": range_str}}.items(), - exclude=["self", "object_range", "full_output", "range_str"], + params = locals() + params.update({"Range": f"bytes={object_range[0]}-{object_range[1]}"} if object_range else {}) + params = self._convert_to_s3_params(params, exclude=["object_range", "full_output", "range_str"]) + response = self._exec_request( + self.boto3_client.get_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, ) - response = self.boto3_client.get_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Get objects result", response, params) if full_output: return response @@ -528,78 +612,93 @@ class Boto3ClientWrapper(S3ClientWrapper): return test_file @reporter.step("Create multipart upload S3") - @report_error def create_multipart_upload(self, bucket: str, key: str) -> str: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.create_multipart_upload(**params) - log_command_execution(self.s3gate_endpoint, "S3 Created multipart upload", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.create_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" - return response["UploadId"] @reporter.step("List multipart uploads S3") - @report_error def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: - response = self.boto3_client.list_multipart_uploads(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List multipart upload", response, {"Bucket": bucket}) - + response = self._exec_request( + self.boto3_client.list_multipart_uploads, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Uploads") @reporter.step("Abort multipart upload S3") - @report_error def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.abort_multipart_upload(**params) - log_command_execution(self.s3gate_endpoint, "S3 Abort multipart upload", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.abort_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Upload part S3") - @report_error def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: with open(filepath, "rb") as put_file: body = put_file.read() - params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath", "body"]) + params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) params["PartNumber"] = part_num - response = self.boto3_client.upload_part(Body=body, **params) - log_command_execution(self.s3gate_endpoint, "S3 Upload part", response, params) - assert response.get("ETag"), f"Expected ETag in response:\n{response}" + response = self._exec_request( + self.boto3_client.upload_part, + params, + body=filepath, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] @reporter.step("Upload copy part S3") - @report_error def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath"]) + params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) params["PartNumber"] = part_num - response = self.boto3_client.upload_part_copy(**params) - log_command_execution(self.s3gate_endpoint, "S3 Upload copy part", response, params) + response = self._exec_request( + self.boto3_client.upload_part_copy, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" - return response["CopyPartResult"]["ETag"] @reporter.step("List parts S3") - @report_error def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_parts(**params) - log_command_execution(self.s3gate_endpoint, "S3 List part", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.list_parts, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) assert response.get("Parts"), f"Expected Parts in response:\n{response}" - return response["Parts"] @reporter.step("Complete multipart upload S3") - @report_error def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] - params = self._convert_to_s3_params(locals().items(), exclude=["self", "parts"]) + params = self._convert_to_s3_params(locals(), exclude=["parts"]) params["MultipartUpload"] = {"Parts": parts} - response = self.boto3_client.complete_multipart_upload(**params) - log_command_execution(self.s3gate_endpoint, "S3 Complete multipart upload", response, params) - - return response + return self._exec_request( + self.boto3_client.complete_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object retention") - @report_error def put_object_retention( self, bucket: str, @@ -608,12 +707,15 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, bypass_governance_retention: Optional[bool] = None, ) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_object_retention(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put object retention ", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.put_object_retention, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object legal hold") - @report_error def put_object_legal_hold( self, bucket: str, @@ -622,36 +724,48 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, ) -> None: legal_hold = {"Status": legal_hold_status} - params = self._convert_to_s3_params(locals().items(), exclude=["self", "legal_hold_status"]) - response = self.boto3_client.put_object_legal_hold(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put object legal hold ", response, params) + params = self._convert_to_s3_params(locals(), exclude=["legal_hold_status"]) + self._exec_request( + self.boto3_client.put_object_legal_hold, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object tagging") - @report_error def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) - response = self.boto3_client.put_object_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put object tagging", response, params) + params = self._convert_to_s3_params(locals(), exclude=["tags"]) + self._exec_request( + self.boto3_client.put_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get object tagging") - @report_error def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.get_object_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Get object tagging", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.get_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("TagSet") @reporter.step("Delete object tagging") - @report_error def delete_object_tagging(self, bucket: str, key: str) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.delete_object_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete object tagging", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.delete_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get object attributes") - @report_error def get_object_attributes( self, bucket: str, @@ -666,7 +780,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return {} @reporter.step("Sync directory S3") - @report_error def sync( self, bucket: str, @@ -677,7 +790,6 @@ class Boto3ClientWrapper(S3ClientWrapper): raise NotImplementedError("Sync is not supported for boto3 client") @reporter.step("CP directory S3") - @report_error def cp( self, bucket: str, @@ -693,36 +805,47 @@ class Boto3ClientWrapper(S3ClientWrapper): # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) @reporter.step("Adds the specified user to the specified group") - @report_error def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.add_user_to_group(**params) - log_command_execution(self.iam_endpoint, "IAM Add User to Group", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.add_user_to_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Attaches the specified managed policy to the specified IAM group") - @report_error def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.attach_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Attach Group Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.attach_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Attaches the specified managed policy to the specified user") - @report_error def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.attach_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Attach User Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.attach_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") - @report_error def iam_create_access_key(self, user_name: str) -> dict: - response = self.boto3_iam_client.create_access_key(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Create Access Key", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.create_access_key, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) access_key_id = response["AccessKey"].get("AccessKeyId") secret_access_key = response["AccessKey"].get("SecretAccessKey") @@ -732,10 +855,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return access_key_id, secret_access_key @reporter.step("Creates a new group") - @report_error def iam_create_group(self, group_name: str) -> dict: - response = self.boto3_iam_client.create_group(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM Create Group", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.create_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Group"), f"Expected Group in response:\n{response}" assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" @@ -743,12 +869,17 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Creates a new managed policy for your AWS account") - @report_error def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["PolicyDocument"] = json.dumps(policy_document) - response = self.boto3_iam_client.create_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Create Policy", response, params) + response = self._exec_request( + self.boto3_iam_client.create_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" @@ -756,10 +887,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Creates a new IAM user for your AWS account") - @report_error def iam_create_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.create_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Create User", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.create_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" @@ -767,89 +901,115 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Deletes the access key pair associated with the specified IAM user") - @report_error def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.delete_access_key(**params) - log_command_execution(self.iam_endpoint, "IAM Delete Access Key", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_access_key, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified IAM group") - @report_error def iam_delete_group(self, group_name: str) -> dict: - response = self.boto3_iam_client.delete_group(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM Delete Group", response, {"GroupName": group_name}) - return response + return self._exec_request( + self.boto3_iam_client.delete_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") - @report_error def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.delete_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Delete Group Policy", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified managed policy") - @report_error def iam_delete_policy(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM Delete Policy", response, {"PolicyArn": policy_arn}) - return response + return self._exec_request( + self.boto3_iam_client.delete_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified IAM user") - @report_error def iam_delete_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.delete_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Delete User", response, {"UserName": user_name}) - return response + return self._exec_request( + self.boto3_iam_client.delete_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") - @report_error def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.delete_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Delete User Policy", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Removes the specified managed policy from the specified IAM group") - @report_error def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.detach_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Detach Group Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.detach_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified managed policy from the specified user") - @report_error def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.detach_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Detach User Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.detach_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") - @report_error def iam_get_group(self, group_name: str) -> dict: - response = self.boto3_iam_client.get_group(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM Get Group", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.get_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") - @report_error def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.get_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Get Group Policy", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.get_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Retrieves information about the specified managed policy") - @report_error def iam_get_policy(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM Get Policy", response, {"PolicyArn": policy_arn}) + response = self._exec_request( + self.boto3_iam_client.get_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" @@ -857,11 +1017,14 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Retrieves information about the specified version of the specified managed policy") - @report_error def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.get_policy_version(**params) - log_command_execution(self.iam_endpoint, "IAM Get Policy Version", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.get_policy_version, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" @@ -869,10 +1032,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Retrieves information about the specified IAM user") - @report_error def iam_get_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.get_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Get User", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.get_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" @@ -880,42 +1046,56 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") - @report_error def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.get_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Get User Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.get_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("UserName"), f"Expected UserName in response:\n{response}" return response @reporter.step("Returns information about the access key IDs associated with the specified IAM user") - @report_error def iam_list_access_keys(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_access_keys(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List Access Keys", response, {"UserName": user_name}) - return response + return self._exec_request( + self.boto3_iam_client.list_access_keys, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Lists all managed policies that are attached to the specified IAM group") - @report_error def iam_list_attached_group_policies(self, group_name: str) -> dict: - response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM List Attached Group Policies", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.list_attached_group_policies, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") - @report_error def iam_list_attached_user_policies(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List Attached User Policies", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.list_attached_user_policies, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") - @report_error def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM List Entities For Policy", response, {"PolicyArn": policy_arn}) + response = self._exec_request( + self.boto3_iam_client.list_entities_for_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" @@ -923,125 +1103,165 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") - @report_error def iam_list_group_policies(self, group_name: str) -> dict: - response = self.boto3_iam_client.list_group_policies(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM List Group Policies", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.list_group_policies, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM groups") - @report_error def iam_list_groups(self) -> dict: - response = self.boto3_iam_client.list_groups() - log_command_execution(self.iam_endpoint, "IAM List Groups", response) + response = self._exec_request( + self.boto3_iam_client.list_groups, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") - @report_error def iam_list_groups_for_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List Groups For User", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.list_groups_for_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists all the managed policies that are available in your AWS account") - @report_error def iam_list_policies(self) -> dict: - response = self.boto3_iam_client.list_policies() - log_command_execution(self.iam_endpoint, "IAM List Policies", response) + response = self._exec_request( + self.boto3_iam_client.list_policies, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Policies"), f"Expected Policies in response:\n{response}" return response @reporter.step("Lists information about the versions of the specified managed policy") - @report_error def iam_list_policy_versions(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM List Policy Versions", response, {"PolicyArn": policy_arn}) + response = self._exec_request( + self.boto3_iam_client.list_policy_versions, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Versions"), f"Expected Versions in response:\n{response}" return response @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") - @report_error def iam_list_user_policies(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_user_policies(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List User Policies", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.list_user_policies, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM users") - @report_error def iam_list_users(self) -> dict: - response = self.boto3_iam_client.list_users() - log_command_execution(self.iam_endpoint, "IAM List Users", response) + response = self._exec_request( + self.boto3_iam_client.list_users, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Users"), f"Expected Users in response:\n{response}" return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") - @report_error def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["PolicyDocument"] = json.dumps(policy_document) - response = self.boto3_iam_client.put_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Put Group Policy", response, params) + response = self._exec_request( + self.boto3_iam_client.put_group_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") - @report_error def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["PolicyDocument"] = json.dumps(policy_document) - response = self.boto3_iam_client.put_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Put User Policy", response, params) + response = self._exec_request( + self.boto3_iam_client.put_user_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified user from the specified group") - @report_error def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.remove_user_from_group(**params) - log_command_execution(self.iam_endpoint, "IAM Remove User From Group", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.remove_user_from_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Updates the name and/or the path of the specified IAM group") - @report_error def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: params = {"GroupName": group_name, "NewGroupName": new_name, "NewPath": "/"} - response = self.boto3_iam_client.update_group(**params) - log_command_execution(self.iam_endpoint, "IAM Update Group", response, params) - return response + return self._exec_request( + self.boto3_iam_client.update_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Updates the name and/or the path of the specified IAM user") - @report_error def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: params = {"UserName": user_name, "NewUserName": new_name, "NewPath": "/"} - response = self.boto3_iam_client.update_user(**params) - log_command_execution(self.iam_endpoint, "IAM Update User", response, params) - return response + return self._exec_request( + self.boto3_iam_client.update_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Adds one or more tags to an IAM user") - @report_error def iam_tag_user(self, user_name: str, tags: list) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["Tags"] = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - response = self.boto3_iam_client.tag_user(**params) - log_command_execution(self.iam_endpoint, "IAM Tag User", response, params) - return response + return self._exec_request( + self.boto3_iam_client.tag_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("List tags of IAM user") - @report_error def iam_list_user_tags(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_user_tags(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List User Tags", response, {"UserName": user_name}) - return response + return self._exec_request( + self.boto3_iam_client.list_user_tags, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Removes the specified tags from the user") - @report_error def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.untag_user(**params) - log_command_execution(self.iam_endpoint, "IAM Untag User", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.untag_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 8e019ea..32e4346 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -9,13 +9,12 @@ import csv import json import logging import re -import subprocess import sys from contextlib import suppress from datetime import datetime from io import StringIO from textwrap import shorten -from typing import Dict, List, Optional, TypedDict, Union +from typing import Any, Optional, Union import pexpect @@ -75,22 +74,75 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date reporter.attach(command_attachment, "Command execution") -def log_command_execution(url: str, cmd: str, output: Union[str, dict], params: Optional[dict] = None) -> None: +def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[dict] = None, **kwargs) -> None: logger.info(f"{cmd}: {output}") - with suppress(Exception): - json_output = json.dumps(output, indent=4, sort_keys=True) - output = json_output + if not params: + params = {} + + output_params = params try: - json_params = json.dumps(params, indent=4, sort_keys=True) + json_params = json.dumps(params, indent=4, sort_keys=True, default=str) except TypeError as err: logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}") else: - params = json_params + output_params = json_params - command_attachment = f"COMMAND: '{cmd}'\n" f"URL: {url}\n" f"PARAMS:\n{params}\n" f"OUTPUT:\n{output}\n" - reporter.attach(command_attachment, "Command execution") + output = json.dumps(output, indent=4, sort_keys=True, default=str) + + command_execution = f"COMMAND: '{cmd}'\n" f"URL: {kwargs['endpoint']}\n" f"PARAMS:\n{output_params}\n" f"OUTPUT:\n{output}\n" + aws_command = _convert_request_to_aws_cli_command(cmd, params, **kwargs) + + reporter.attach(command_execution, "Command execution") + reporter.attach(aws_command, "AWS CLI Command") + + +def _convert_request_to_aws_cli_command(command: str, params: dict, **kwargs) -> str: + overriden_names = [_convert_json_name_to_aws_cli(name) for name in kwargs.keys()] + command = command.replace("_", "-") + options = [] + + for name, value in params.items(): + name = _convert_json_name_to_aws_cli(name) + + # To override parameters for AWS CLI + if name in overriden_names: + continue + + if option := _create_option(name, value): + options.append(option) + + for name, value in kwargs.items(): + name = _convert_json_name_to_aws_cli(name) + if option := _create_option(name, value): + options.append(option) + + options = " ".join(options) + api = "s3api" if "s3" in kwargs["endpoint"] else "iam" + return f"aws --no-verify-ssl --no-paginate {api} {command} {options}" + + +def _convert_json_name_to_aws_cli(name: str) -> str: + specific_names = {"CORSConfiguration": "cors-configuration"} + + if aws_cli_name := specific_names.get(name): + return aws_cli_name + return re.sub(r"([a-z])([A-Z])", r"\1 \2", name).lower().replace(" ", "-").replace("_", "-") + + +def _create_option(name: str, value: Any) -> str | None: + if isinstance(value, bool) and value: + return f"--{name}" + + if isinstance(value, dict): + value = json.dumps(value, indent=4, sort_keys=True, default=str) + return f"--{name} '{value}'" + + if value: + return f"--{name} {value}" + + return None def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: From 6f1baf3cf6384f7adeb300bd9d6c9406f4abdcf3 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Fri, 1 Nov 2024 15:50:17 +0300 Subject: [PATCH 216/274] [#312] update morph remove_nodes --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 7228692..2958884 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -341,7 +341,6 @@ class FrostfsAdmMorph(CliCommand): return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, - **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, ) def add_rule( From ea4094051413cf49b74277794a0e3b99221d05f6 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Tue, 5 Nov 2024 12:37:56 +0300 Subject: [PATCH 217/274] [#313] update force_new_epoch --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 2958884..f3e0137 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -122,7 +122,9 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def force_new_epoch(self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: + def force_new_epoch( + self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None + ) -> CommandResult: """Create new FrostFS epoch event in the side chain. Args: From 55d8ee5da0cc7113fe864ebfadd028234891bf98 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 8 Nov 2024 15:46:02 +0300 Subject: [PATCH 218/274] [#315] Add http client Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/__init__.py | 0 src/frostfs_testlib/http/http_client.py | 95 +++++++++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 src/frostfs_testlib/http/__init__.py create mode 100644 src/frostfs_testlib/http/http_client.py diff --git a/src/frostfs_testlib/http/__init__.py b/src/frostfs_testlib/http/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py new file mode 100644 index 0000000..261b2a6 --- /dev/null +++ b/src/frostfs_testlib/http/http_client.py @@ -0,0 +1,95 @@ +import json +import logging +import logging.config + +import httpx + +from frostfs_testlib import reporter + +timeout = httpx.Timeout(60, read=150) +LOGGING_CONFIG = { + "disable_existing_loggers": False, + "version": 1, + "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, + "formatters": { + "http": { + "format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + } + }, + "loggers": { + "httpx": { + "handlers": ["default"], + "level": "DEBUG", + }, + "httpcore": { + "handlers": ["default"], + "level": "ERROR", + }, + }, +} + +logging.config.dictConfig(LOGGING_CONFIG) +logger = logging.getLogger("NeoLogger") + + +class HttpClient: + @reporter.step("Send {method} request to {url}") + def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response: + transport = httpx.HTTPTransport(verify=False, retries=5) + client = httpx.Client(timeout=timeout, transport=transport) + response = client.request(method, url, **kwargs) + + self._attach_response(response) + logger.info(f"Response: {response.status_code} => {response.text}") + + if expected_status_code: + assert response.status_code == expected_status_code, ( + f"Got {response.status_code} response code" f" while {expected_status_code} expected" + ) + + return response + + def _attach_response(self, response: httpx.Response): + request = response.request + + try: + request_headers = json.dumps(dict(request.headers), indent=4) + except json.JSONDecodeError: + request_headers = str(request.headers) + + try: + request_body = request.read() + try: + request_body = request_body.decode("utf-8") + except UnicodeDecodeError as e: + request_body = f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}" + except Exception as e: + request_body = f"Error reading request body: {str(e)}" + + request_body = "" if request_body is None else request_body + + try: + response_headers = json.dumps(dict(response.headers), indent=4) + except json.JSONDecodeError: + response_headers = str(response.headers) + + report = ( + f"Method: {request.method}\n\n" + f"URL: {request.url}\n\n" + f"Request Headers: {request_headers}\n\n" + f"Request Body: {request_body}\n\n" + f"Response Status Code: {response.status_code}\n\n" + f"Response Headers: {response_headers}\n\n" + f"Response Body: {response.text}\n\n" + ) + curl_request = self._create_curl_request(request.url, request.method, request.headers, request_body) + + reporter.attach(report, "Requests Info") + reporter.attach(curl_request, "CURL") + + def _create_curl_request(self, url: str, method: str, headers: httpx.Headers, data: str) -> str: + headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) + data = f" -d '{data}'" if data else "" + # Option -k means no verify SSL + return f"curl {url} -X {method} {headers}{data} -k" From 95b32a036a8043191f3cec6dd249ee95fa1aa3a6 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 12 Nov 2024 12:28:10 +0300 Subject: [PATCH 219/274] [#316] Extend parallel exception message output Signed-off-by: a.berezin --- src/frostfs_testlib/testing/parallel.py | 38 ++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index 0549e61..6c4f6e0 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -1,4 +1,5 @@ import itertools +import traceback from concurrent.futures import Future, ThreadPoolExecutor from contextlib import contextmanager from typing import Callable, Collection, Optional, Union @@ -55,7 +56,42 @@ def parallel( # Check for exceptions exceptions = [future.exception() for future in futures if future.exception()] if exceptions: - message = "\n".join([str(e) for e in exceptions]) + # Prettify exception in parallel with all underlying stack traces + # For example, we had 3 RuntimeError exceptions during parallel. This format will give us something like + # + # RuntimeError: The following exceptions occured during parallel run: + # 1) Exception one text + # 2) Exception two text + # 3) Exception three text + # TRACES: + # ==== 1 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception one text") + # RuntimeError: Exception one text + # + # ==== 2 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception two text") + # RuntimeError: Exception two text + # + # ==== 3 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception three text") + # RuntimeError: Exception three text + short_summary = "\n".join([f"{i}) {str(e)}" for i, e in enumerate(exceptions, 1)]) + stack_traces = "\n".join( + [f"==== {i} ====\n{''.join(traceback.TracebackException.from_exception(e).format())}" for i, e in enumerate(exceptions, 1)] + ) + message = f"{short_summary}\nTRACES:\n{stack_traces}" raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}") return futures From 2a90ec74ff70934d65fa13e78c348afda3b195c2 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Tue, 12 Nov 2024 16:01:12 +0300 Subject: [PATCH 220/274] [#317] update morph rule chain --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index f3e0137..5e39cf4 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -353,6 +353,7 @@ class FrostfsAdmMorph(CliCommand): rule: Optional[list[str]] = None, path: Optional[str] = None, chain_id_hex: Optional[bool] = None, + chain_name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, timeout: Optional[str] = None, @@ -383,6 +384,7 @@ class FrostfsAdmMorph(CliCommand): target_name: str, target_type: str, chain_id_hex: Optional[bool] = None, + chain_name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, timeout: Optional[str] = None, @@ -410,6 +412,7 @@ class FrostfsAdmMorph(CliCommand): target_type: str, target_name: Optional[str] = None, rpc_endpoint: Optional[str] = None, + chain_name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, timeout: Optional[str] = None, @@ -436,6 +439,7 @@ class FrostfsAdmMorph(CliCommand): target_name: str, target_type: str, all: Optional[bool] = None, + chain_name: Optional[str] = None, chain_id_hex: Optional[bool] = None, wallet: Optional[str] = None, address: Optional[str] = None, From 47bc11835bb7869e2b87e761e432e923fcd90343 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 13 Nov 2024 10:10:35 +0300 Subject: [PATCH 221/274] [#318] Add tombstone expiration test Signed-off-by: a.berezin --- src/frostfs_testlib/hosting/docker_host.py | 3 ++ src/frostfs_testlib/hosting/interfaces.py | 11 +++++++ src/frostfs_testlib/resources/common.py | 1 + src/frostfs_testlib/storage/cluster.py | 6 ++-- .../controllers/cluster_state_controller.py | 21 ++++++++++++-- .../state_managers/config_state_manager.py | 29 ++++++++++++++----- .../storage/dataclasses/node_base.py | 12 ++++---- 7 files changed, 63 insertions(+), 20 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 5110e63..01dc6b5 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -164,6 +164,9 @@ class DockerHost(Host): return volume_path + def send_signal_to_service(self, service_name: str, signal: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_metabase(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index b84326a..6d1e5da 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -117,6 +117,17 @@ class Host(ABC): service_name: Name of the service to stop. """ + @abstractmethod + def send_signal_to_service(self, service_name: str, signal: str) -> None: + """Send signal to service with specified name using kill - + + The service must be hosted on this host. + + Args: + service_name: Name of the service to stop. + signal: signal name. See kill -l to all names + """ + @abstractmethod def mask_service(self, service_name: str) -> None: """Prevent the service from start by any activity by masking it. diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 1c93b12..53bcfaa 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -53,3 +53,4 @@ HOSTING_CONFIG_FILE = os.getenv( ) MORE_LOG = os.getenv("MORE_LOG", "1") +EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 9fcc4c9..3ec4922 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -11,10 +11,10 @@ from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.metrics import Metrics from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry -from frostfs_testlib.storage.dataclasses.metrics import Metrics class ClusterNode: @@ -91,10 +91,10 @@ class ClusterNode: config_str = yaml.dump(new_config) shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") - def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: + def config(self, service_type: ServiceClass) -> ServiceConfigurationYml: return self.service(service_type).config - def service(self, service_type: type[ServiceClass]) -> ServiceClass: + def service(self, service_type: ServiceClass) -> ServiceClass: """ Get a service cluster node of specified type. diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 53098b1..5080d40 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -172,6 +172,15 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Send sighup to all {service_type} services") + def sighup_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.send_signal_to_service for service in services], signal="SIGHUP") + + if service_type == StorageNode: + self.wait_after_storage_startup() + @wait_for_success(600, 60) def wait_s3gate(self, s3gate: S3Gate): with reporter.step(f"Wait for {s3gate} reconnection"): @@ -206,21 +215,27 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Stop {service_type} service on {node}") - def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): + def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True): service = node.service(service_type) service.stop_service(mask) self.stopped_services.add(service) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Send sighup to {service_type} service on {node}") + def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.send_signal_to_service("SIGHUP") + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Start {service_type} service on {node}") - def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): + def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass): service = node.service(service_type) service.start_service() self.stopped_services.discard(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Start all stopped {service_type} services") - def start_stopped_services_of_type(self, service_type: type[ServiceClass]): + def start_stopped_services_of_type(self, service_type: ServiceClass): stopped_svc = self._get_stopped_by_type(service_type) if not stopped_svc: return diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py index 66f72d6..f0b2a21 100644 --- a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -14,14 +14,19 @@ class ConfigStateManager(StateManager): self.cluster = self.csc.cluster @reporter.step("Change configuration for {service_type} on all nodes") - def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): + def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False): services = self.cluster.services(service_type) nodes = self.cluster.nodes(services) self.services_with_changed_config.update([(node, service_type) for node in nodes]) - self.csc.stop_services_of_type(service_type) + if not sighup: + self.csc.stop_services_of_type(service_type) + parallel([node.config(service_type).set for node in nodes], values=values) - self.csc.start_services_of_type(service_type) + if not sighup: + self.csc.start_services_of_type(service_type) + else: + self.csc.sighup_services_of_type(service_type) @reporter.step("Change configuration for {service_type} on {node}") def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): @@ -32,18 +37,26 @@ class ConfigStateManager(StateManager): self.csc.start_service_of_type(node, service_type) @reporter.step("Revert all configuration changes") - def revert_all(self): + def revert_all(self, sighup: bool = False): if not self.services_with_changed_config: return - parallel(self._revert_svc, self.services_with_changed_config) + parallel(self._revert_svc, self.services_with_changed_config, sighup) self.services_with_changed_config.clear() - self.csc.start_all_stopped_services() + if not sighup: + self.csc.start_all_stopped_services() # TODO: parallel can't have multiple parallel_items :( @reporter.step("Revert all configuration {node_and_service}") - def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): + def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False): node, service_type = node_and_service - self.csc.stop_service_of_type(node, service_type) + service = node.service(service_type) + + if not sighup: + self.csc.stop_service_of_type(node, service_type) + node.config(service_type).revert() + + if sighup: + service.send_signal_to_service("SIGHUP") diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 8291345..180877d 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -65,6 +65,10 @@ class NodeBase(HumanReadableABC): with reporter.step(f"Start {self.name} service on {self.host.config.address}"): self.host.start_service(self.name) + def send_signal_to_service(self, signal: str): + with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"): + self.host.send_signal_to_service(self.name, signal) + @abstractmethod def service_healthcheck(self) -> bool: """Service healthcheck.""" @@ -185,9 +189,7 @@ class NodeBase(HumanReadableABC): if attribute_name not in config.attributes: if default_attribute_name is None: - raise RuntimeError( - f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either" - ) + raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either") return config.attributes[default_attribute_name] @@ -197,9 +199,7 @@ class NodeBase(HumanReadableABC): return self.host.get_service_config(self.name) def get_service_uptime(self, service: str) -> datetime: - result = self.host.get_shell().exec( - f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2" - ) + result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2") start_time = parser.parse(result.stdout.strip()) current_time = datetime.now(tz=timezone.utc) active_time = current_time - start_time From f24bfc06fd04f0fc195135315d1d3a9c9828fcf8 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 13 Nov 2024 17:46:03 +0300 Subject: [PATCH 222/274] [#319] Add cached fixture feature Signed-off-by: a.berezin --- src/frostfs_testlib/resources/optionals.py | 11 +++--- src/frostfs_testlib/testing/test_control.py | 39 +++++++++++++++++++++ 2 files changed, 44 insertions(+), 6 deletions(-) diff --git a/src/frostfs_testlib/resources/optionals.py b/src/frostfs_testlib/resources/optionals.py index 2a7ff22..6caf158 100644 --- a/src/frostfs_testlib/resources/optionals.py +++ b/src/frostfs_testlib/resources/optionals.py @@ -16,11 +16,10 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD") OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true")) # Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped. -OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool( - os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true") -) +OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")) # Set this to False for disable autouse fixture like node healthcheck during developing time. -OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool( - os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true") -) +OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")) + +# Use cache for fixtures with @cachec_fixture decorator +OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false")) diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py index 4fa6390..bc38208 100644 --- a/src/frostfs_testlib/testing/test_control.py +++ b/src/frostfs_testlib/testing/test_control.py @@ -1,13 +1,16 @@ import inspect import logging +import os from functools import wraps from time import sleep, time from typing import Any +import yaml from _pytest.outcomes import Failed from pytest import fail from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.utils.func_utils import format_by_args logger = logging.getLogger("NeoLogger") @@ -128,6 +131,42 @@ def run_optionally(enabled: bool, mock_value: Any = True): return deco +def cached_fixture(enabled: bool): + """ + Decorator to cache fixtures. + MUST be placed after @pytest.fixture and before @allure decorators. + + Args: + enabled: if true, decorated func will be cached. + """ + + def deco(func): + @wraps(func) + def func_impl(*a, **kw): + # TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters + cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml") + + if enabled and os.path.exists(cache_file): + with open(cache_file, "r") as cache_input: + return yaml.load(cache_input, Loader=yaml.Loader) + + result = func(*a, **kw) + + if enabled: + with open(cache_file, "w") as cache_output: + yaml.dump(result, cache_output) + return result + + # TODO: cache yielding fixtures + @wraps(func) + def gen_impl(*a, **kw): + raise NotImplementedError("Not implemented for yielding fixtures") + + return gen_impl if inspect.isgeneratorfunction(func) else func_impl + + return deco + + def wait_for_success( max_wait_time: int = 60, interval: int = 1, From 451de5e07e7ef6dd68e684aaa431839583a82089 Mon Sep 17 00:00:00 2001 From: anurindm Date: Thu, 14 Nov 2024 16:22:06 +0300 Subject: [PATCH 223/274] [#320] Added shards detach function Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 82ea87b..68a2f54 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -241,3 +241,21 @@ class FrostfsCliShards(CliCommand): "control shards evacuation status", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None): + """ + Detach and close the shards + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + id: List of shard IDs in base58 encoding + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards detach", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) From a1953684b87f8c3d96f95a14ce98f59fdcab657b Mon Sep 17 00:00:00 2001 From: Roman Chernykh Date: Wed, 16 Oct 2024 18:42:42 +0300 Subject: [PATCH 224/274] [#307] added methods for testing MFA --- src/frostfs_testlib/s3/aws_cli_client.py | 87 ++++++++++++++++++++++++ src/frostfs_testlib/s3/boto3_client.py | 73 ++++++++++++++++++++ src/frostfs_testlib/s3/interfaces.py | 29 ++++++++ src/frostfs_testlib/utils/file_utils.py | 8 ++- 4 files changed, 195 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index ff4e329..ba95733 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -1440,3 +1440,90 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response + + # MFA METHODS + @reporter.step("Creates a new virtual MFA device") + def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple: + cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\ + --outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}" + + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") + assert serial_number, f"Expected SerialNumber in response:\n{response}" + + return serial_number, False + + @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes a virtual MFA device") + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\ + --authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Lists the MFA devices for an IAM user") + def iam_list_virtual_mfa_devices(self) -> dict: + cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" + + return response + + @reporter.step("Get session token for user") + def sts_get_session_token( + self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None + ) -> tuple: + cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}" + if duration_seconds: + cmd += f" --duration-seconds {duration_seconds}" + if serial_number: + cmd += f" --serial-number {serial_number}" + if token_code: + cmd += f" --token-code {token_code}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + access_key = response.get("Credentials", {}).get("AccessKeyId") + secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") + session_token = response.get("Credentials", {}).get("SessionToken") + assert access_key, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + assert session_token, f"Expected SessionToken in response:\n{response}" + + return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 91d8c5a..12113ad 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -41,6 +41,8 @@ class Boto3ClientWrapper(S3ClientWrapper): self.boto3_iam_client: S3Client = None self.iam_endpoint: str = "" + self.boto3_sts_client: S3Client = None + self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.profile = profile @@ -87,6 +89,14 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint_url=self.iam_endpoint, verify=False, ) + # since the STS does not have an enpoint, IAM is used + self.boto3_sts_client = self.session.client( + service_name="sts", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + endpoint_url=iam_endpoint, + verify=False, + ) def _to_s3_param(self, param: str) -> str: replacement_map = { @@ -1265,3 +1275,66 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) + + # MFA methods + @reporter.step("Creates a new virtual MFA device") + def iam_create_virtual_mfa_device( + self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None + ) -> tuple: + response = self.boto3_iam_client.create_virtual_mfa_device(VirtualMFADeviceName=virtual_mfa_device_name) + + serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") + base32StringSeed = response.get("VirtualMFADevice", {}).get("Base32StringSeed") + assert serial_number, f"Expected SerialNumber in response:\n{response}" + assert base32StringSeed, f"Expected Base32StringSeed in response:\n{response}" + + return serial_number, base32StringSeed + + @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + response = self.boto3_iam_client.deactivate_mfa_device(UserName=user_name, SerialNumber=serial_number) + + return response + + @reporter.step("Deletes a virtual MFA device") + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + response = self.boto3_iam_client.delete_virtual_mfa_device(SerialNumber=serial_number) + + return response + + @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + response = self.boto3_iam_client.enable_mfa_device( + UserName=user_name, + SerialNumber=serial_number, + AuthenticationCode1=authentication_code1, + AuthenticationCode2=authentication_code2, + ) + + return response + + @reporter.step("Lists the MFA devices for an IAM user") + def iam_list_virtual_mfa_devices(self) -> dict: + response = self.boto3_iam_client.list_virtual_mfa_devices() + assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" + + return response + + @reporter.step("Get session token for user") + def sts_get_session_token( + self, duration_seconds: Optional[str] = "", serial_number: Optional[str] = "", token_code: Optional[str] = "" + ) -> tuple: + response = self.boto3_sts_client.get_session_token( + DurationSeconds=duration_seconds, + SerialNumber=serial_number, + TokenCode=token_code, + ) + + access_key = response.get("Credentials", {}).get("AccessKeyId") + secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") + session_token = response.get("Credentials", {}).get("SessionToken") + assert access_key, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + assert session_token, f"Expected SessionToken in response:\n{response}" + + return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index c084484..69a5154 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -578,3 +578,32 @@ class S3ClientWrapper(HumanReadableABC): @abstractmethod def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: """Removes the specified tags from the user""" + + # MFA methods + @abstractmethod + def iam_create_virtual_mfa_device( + self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None + ) -> tuple: + """Creates a new virtual MFA device""" + + @abstractmethod + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + """Deactivates the specified MFA device and removes it from association with the user name""" + + @abstractmethod + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + """Deletes a virtual MFA device""" + + @abstractmethod + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + """Enables the specified MFA device and associates it with the specified IAM user""" + + @abstractmethod + def iam_list_virtual_mfa_devices(self) -> dict: + """Lists the MFA devices for an IAM user""" + + @abstractmethod + def sts_get_session_token( + self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None + ) -> tuple: + """Get session token for user""" diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index c2b497f..8839d7f 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -45,7 +45,7 @@ def ensure_directory_opener(path, flags): # TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps # Use object_size dt in future as argument @reporter.step("Generate file") -def generate_file(size: int) -> TestFile: +def generate_file(size: int, file_name: Optional[str] = None) -> TestFile: """Generates a binary file with the specified size in bytes. Args: @@ -54,7 +54,11 @@ def generate_file(size: int) -> TestFile: Returns: The path to the generated file. """ - test_file = TestFile(os.path.join(ASSETS_DIR, string_utils.unique_name("object-"))) + + if file_name is None: + file_name = string_utils.unique_name("object-") + + test_file = TestFile(os.path.join(ASSETS_DIR, file_name)) with open(test_file, "wb", opener=ensure_directory_opener) as file: file.write(os.urandom(size)) logger.info(f"File with size {size} bytes has been generated: {test_file}") From 8eaa511e5c39feaad06f7c3bf795639fcbbaac92 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 18 Nov 2024 16:57:14 +0300 Subject: [PATCH 225/274] [#322] Added classmethod decorator in Http client Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/http/http_client.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 261b2a6..3106273 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -50,7 +50,8 @@ class HttpClient: return response - def _attach_response(self, response: httpx.Response): + @classmethod + def _attach_response(cls, response: httpx.Response): request = response.request try: @@ -83,12 +84,13 @@ class HttpClient: f"Response Headers: {response_headers}\n\n" f"Response Body: {response.text}\n\n" ) - curl_request = self._create_curl_request(request.url, request.method, request.headers, request_body) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body) reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") - def _create_curl_request(self, url: str, method: str, headers: httpx.Headers, data: str) -> str: + @classmethod + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str) -> str: headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) data = f" -d '{data}'" if data else "" # Option -k means no verify SSL From 0c9660fffc43b6cbeecf119a4e1cb3008020c042 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 20 Nov 2024 17:14:33 +0300 Subject: [PATCH 226/274] [#323] Update APE related entities Signed-off-by: a.berezin --- src/frostfs_testlib/resources/error_patterns.py | 8 ++++++-- src/frostfs_testlib/storage/dataclasses/ape.py | 14 +++++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 3ba5f13..9b5e8e4 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -27,6 +27,10 @@ S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" -RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" +# Errors from node missing reasons if request was forwarded. Commenting for now +# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" +RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request" NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" -NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" +# Errors from node missing reasons if request was forwarded. Commenting for now +# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" +NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request" diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index f0f1758..ef2e1f2 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -26,6 +26,18 @@ class ObjectOperations(HumanReadableEnum): return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] +class ContainerOperations(HumanReadableEnum): + PUT = "container.put" + GET = "container.get" + LIST = "container.list" + DELETE = "container.delete" + WILDCARD_ALL = "container.*" + + @staticmethod + def get_all(): + return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] + + @dataclass class Operations: GET_CONTAINER = "GetContainer" @@ -124,7 +136,7 @@ class Rule: if not operations: self.operations = [] - elif isinstance(operations, ObjectOperations): + elif isinstance(operations, (ObjectOperations, ContainerOperations)): self.operations = [operations] else: self.operations = operations From 24e1dfef282b46e40c900711563e3f69b24220cb Mon Sep 17 00:00:00 2001 From: Roman Chernykh Date: Mon, 18 Nov 2024 13:01:26 +0300 Subject: [PATCH 227/274] [#324]Extension list_objects method --- src/frostfs_testlib/s3/aws_cli_client.py | 13 +++++++++++-- src/frostfs_testlib/s3/boto3_client.py | 11 +++++++++-- src/frostfs_testlib/s3/interfaces.py | 4 +++- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index ba95733..2ac6d68 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -196,11 +196,20 @@ class AwsCliClient(S3ClientWrapper): return response.get("LocationConstraint") @reporter.step("List objects S3") - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} " + if page_size: + cmd = cmd.replace("--no-paginate", "") + cmd += f" --page-size {page_size} " + if prefix: + cmd += f" --prefix {prefix}" + if self.profile: + cmd += f" --profile {self.profile} " output = self.local_shell.exec(cmd).stdout response = self._to_json(output) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 12113ad..e7f2c35 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -398,10 +398,17 @@ class Boto3ClientWrapper(S3ClientWrapper): return response if full_output else obj_list @reporter.step("List objects S3") - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: + params = {"Bucket": bucket} + if page_size: + params["MaxKeys"] = page_size + if prefix: + params["Prefix"] = prefix response = self._exec_request( self.boto3_client.list_objects, - params={"Bucket": bucket}, + params, endpoint=self.s3gate_endpoint, profile=self.profile, ) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 69a5154..c3d99eb 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -195,7 +195,9 @@ class S3ClientWrapper(HumanReadableABC): """ @abstractmethod - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: """Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application From 3dc7a5bdb095dbf02c9942f6844540efcccf1b88 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 28 Nov 2024 16:43:46 +0300 Subject: [PATCH 228/274] [#328] Change logic activating split-brain Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/network.py | 18 ++++---- .../controllers/cluster_state_controller.py | 41 +++++++++++++------ 2 files changed, 38 insertions(+), 21 deletions(-) diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py index efaaf5a..6bde2f1 100644 --- a/src/frostfs_testlib/steps/network.py +++ b/src/frostfs_testlib/steps/network.py @@ -4,16 +4,18 @@ from frostfs_testlib.storage.cluster import ClusterNode class IpHelper: @staticmethod - def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: + def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None: shell = node.host.get_shell() - for ip in block_ip: - shell.exec(f"ip route add blackhole {ip}") + for ip, table in block_ip: + if not table: + shell.exec(f"ip r a blackhole {ip}") + continue + shell.exec(f"ip r a blackhole {ip} table {table}") @staticmethod def restore_input_traffic_to_node(node: ClusterNode) -> None: shell = node.host.get_shell() - unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False)) - if unlock_ip.return_code != 0: - return - for ip in unlock_ip.stdout.strip().split("\n"): - shell.exec(f"ip route del blackhole {ip.split(' ')[1]}") + unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout + + for active_blackhole in unlock_ip.strip().split("\n"): + shell.exec(f"ip r d {active_blackhole}") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 5080d40..67e4d60 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,4 +1,5 @@ import datetime +import itertools import logging import time from typing import TypeVar @@ -39,7 +40,7 @@ class ClusterStateController: def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} - self.dropped_traffic: list[ClusterNode] = [] + self.dropped_traffic: set[ClusterNode] = set() self.excluded_from_netmap: list[StorageNode] = [] self.stopped_services: set[NodeBase] = set() self.cluster = cluster @@ -325,22 +326,22 @@ class ClusterStateController: @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: - list_ip = self._parse_interfaces(block_nodes, name_interface) - IpHelper.drop_input_traffic_to_node(node, list_ip) + interfaces_tables = self._parse_interfaces(block_nodes, name_interface) + IpHelper.drop_input_traffic_to_node(node, interfaces_tables) time.sleep(wakeup_timeout) - self.dropped_traffic.append(node) + self.dropped_traffic.add(node) @reporter.step("Start traffic to {node}") def restore_traffic(self, node: ClusterNode) -> None: IpHelper.restore_input_traffic_to_node(node=node) - index = self.dropped_traffic.index(node) - self.dropped_traffic.pop(index) + self.dropped_traffic.discard(node) @reporter.step("Restore blocked nodes") def restore_all_traffic(self): if not self.dropped_traffic: return parallel(self._restore_traffic_to_node, self.dropped_traffic) + self.dropped_traffic.clear() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Hard reboot host {node} via magic SysRq option") @@ -516,17 +517,31 @@ class ClusterStateController: return disk_controller + @reporter.step("Restore traffic {node}") def _restore_traffic_to_node(self, node): IpHelper.restore_input_traffic_to_node(node) - def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str): - interfaces = [] + def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]: + interfaces_and_tables = set() for node in nodes: - dict_interfaces = node.host.config.interfaces - for type, ip in dict_interfaces.items(): - if name_interface in type: - interfaces.append(ip) - return interfaces + shell = node.host.get_shell() + lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines() + + ips = [] + tables = [] + + for line in lines: + if "src" not in line or "table local" in line: + continue + parts = line.split() + ips.append(parts[-1]) + if "table" in line: + tables.append(parts[parts.index("table") + 1]) + tables.append(None) + + [interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)] + + return interfaces_and_tables @reporter.step("Ping node") def _ping_host(self, node: ClusterNode): From 7d6768c83ff9d8169f1f73b01ae51b639db6c1cd Mon Sep 17 00:00:00 2001 From: anurindm Date: Thu, 28 Nov 2024 17:10:43 +0300 Subject: [PATCH 229/274] [#325] Added get nns records method to frostfs-adm Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 23 ++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 5e39cf4..bdf4a91 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -463,3 +463,26 @@ class FrostfsAdmMorph(CliCommand): "morph ape rm-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def get_nns_records( + self, + name: str, + type: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + alphabet_wallets: Optional[str] = None, + ) -> CommandResult: + """Returns domain record of the specified type + + Args: + name: Domain name + type: Domain name service record type(A|CNAME|SOA|TXT) + rpc_endpoint: N3 RPC node endpoint + alphabet_wallets: path to alphabet wallets dir + + Returns: + Command's result + """ + return self._execute( + "morph nns get-records", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) From 0e040d2722526c3a7ea092f6167b5324a87170f0 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Mon, 2 Dec 2024 14:18:17 +0300 Subject: [PATCH 230/274] [#330] Improve CURL generation and fix Boto3 logging Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 100 +++++++++++++++++------- src/frostfs_testlib/utils/cli_utils.py | 3 + 2 files changed, 76 insertions(+), 27 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 3106273..0d1e0bd 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -1,6 +1,8 @@ +import io import json import logging import logging.config +from typing import IO import httpx @@ -40,7 +42,7 @@ class HttpClient: client = httpx.Client(timeout=timeout, transport=transport) response = client.request(method, url, **kwargs) - self._attach_response(response) + self._attach_response(response, **kwargs) logger.info(f"Response: {response.status_code} => {response.text}") if expected_status_code: @@ -51,47 +53,91 @@ class HttpClient: return response @classmethod - def _attach_response(cls, response: httpx.Response): - request = response.request - + def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None: try: - request_headers = json.dumps(dict(request.headers), indent=4) - except json.JSONDecodeError: - request_headers = str(request.headers) - - try: - request_body = request.read() - try: - request_body = request_body.decode("utf-8") - except UnicodeDecodeError as e: - request_body = f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}" + content = readable.read() except Exception as e: - request_body = f"Error reading request body: {str(e)}" + logger.warning(f"Unable to read file: {str(e)}") + return None - request_body = "" if request_body is None else request_body + if not content: + return None + + request_body = None try: - response_headers = json.dumps(dict(response.headers), indent=4) - except json.JSONDecodeError: - response_headers = str(response.headers) + request_body = json.loads(content) + except (json.JSONDecodeError, UnicodeDecodeError) as e: + logger.warning(f"Unable to convert body to json: {str(e)}") + + if request_body is not None: + return json.dumps(request_body, default=str, indent=4) + + try: + request_body = content.decode() + except UnicodeDecodeError as e: + logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}") + + request_body = content if request_body is None else request_body + request_body = "" if len(request_body) > 1000 else request_body + + return request_body + + @classmethod + def _parse_files(cls, files: dict | None) -> str | None: + if not files: + return None + + filepaths = {} + + for name, file in files.items(): + if isinstance(file, io.IOBase): + filepaths[name] = file.name + + if isinstance(file, tuple): + filepaths[name] = file[1].name + + return json.dumps(filepaths, default=str, indent=4) + + @classmethod + def _attach_response(cls, response: httpx.Response, **kwargs): + request = response.request + request_headers = json.dumps(dict(request.headers), default=str, indent=4) + request_body = cls._parse_body(request) + + files = kwargs.get("files") + request_files = cls._parse_files(files) + + response_headers = json.dumps(dict(response.headers), default=str, indent=4) + response_body = cls._parse_body(response) report = ( f"Method: {request.method}\n\n" - f"URL: {request.url}\n\n" - f"Request Headers: {request_headers}\n\n" - f"Request Body: {request_body}\n\n" - f"Response Status Code: {response.status_code}\n\n" - f"Response Headers: {response_headers}\n\n" - f"Response Body: {response.text}\n\n" + + f"URL: {request.url}\n\n" + + f"Request Headers: {request_headers}\n\n" + + (f"Request Body: {request_body}\n\n" if request_body else "") + + (f"Request Files: {request_files}\n\n" if request_files else "") + + f"Response Status Code: {response.status_code}\n\n" + + f"Response Headers: {response_headers}\n\n" + + (f"Response Body: {response_body}\n\n" if response_body else "") ) - curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, files) reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") @classmethod - def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str) -> str: + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict = None) -> str: headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) data = f" -d '{data}'" if data else "" + + if files: + for name, file in files.items(): + if isinstance(file, io.IOBase): + data += f' -F "{name}=@{file.name}"' + + if isinstance(file, tuple): + data += f' -F "{name}=@{file[1].name}"' + # Option -k means no verify SSL return f"curl {url} -X {method} {headers}{data} -k" diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 32e4346..0f9fef2 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -80,6 +80,9 @@ def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[d if not params: params = {} + if params.get("Body") and len(params.get("Body")) > 1000: + params["Body"] = "" + output_params = params try: From 8ec7e21e8450167d02875b3255ab9140f60facb2 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 3 Dec 2024 14:55:12 +0300 Subject: [PATCH 231/274] [#331] Fix type hints for service methods Signed-off-by: a.berezin --- src/frostfs_testlib/storage/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 3ec4922..b67e34d 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -91,10 +91,10 @@ class ClusterNode: config_str = yaml.dump(new_config) shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") - def config(self, service_type: ServiceClass) -> ServiceConfigurationYml: + def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: return self.service(service_type).config - def service(self, service_type: ServiceClass) -> ServiceClass: + def service(self, service_type: type[ServiceClass]) -> ServiceClass: """ Get a service cluster node of specified type. From b3d05c5c28ab6727e3e56bdba7de05e8ed9fb6b1 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 15 Nov 2024 21:01:34 +0300 Subject: [PATCH 232/274] [#326] Automation of PATCH method in GRPC Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/frostfs_cli/object.py | 47 +++++++++++ src/frostfs_testlib/storage/constants.py | 2 + .../storage/dataclasses/ape.py | 1 + .../grpc_operations/implementations/object.py | 83 +++++++++++++++++++ .../storage/grpc_operations/interfaces.py | 32 +++++++ 5 files changed, 165 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 1857987..0c00563 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -276,6 +276,53 @@ class FrostfsCliObject(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) + def patch( + self, + rpc_endpoint: str, + cid: str, + oid: str, + range: list[str] = None, + payload: list[str] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ttl: Optional[int] = None, + wallet: Optional[str] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + PATCH an object. + + Args: + rpc_endpoint: Remote node address (as 'multiaddr' or ':') + cid: Container ID + oid: Object ID + range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] + payload: An array of file paths to be applied in each range + new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2 + replace_attrs: Replace all attributes completely with new ones specified in new_attrs + address: Address of wallet account + bearer: File with signed JSON or binary encoded bearer token + generate_key: Generate new private key + session: Filepath to a JSON- or binary-encoded token of the object RANGE session + timeout: Timeout for the operation + trace: Generate trace ID and print it + ttl: TTL value in request meta header (default 2) + wallet: WIF (NEP-2) string or path to the wallet or binary key + xhdr: Dict with request X-Headers + Returns: + (str): ID of patched Object + """ + return self._execute( + "object patch", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + def range( self, rpc_endpoint: str, diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 2cffd3a..39c6b66 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -23,4 +23,6 @@ class PlacementRule: DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" + REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X" DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" + EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X" diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index ef2e1f2..b7b5dfc 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -13,6 +13,7 @@ FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 class ObjectOperations(HumanReadableEnum): PUT = "object.put" + PATCH = "object.patch" GET = "object.get" HEAD = "object.head" GET_RANGE = "object.range" diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index 0e14aec..f31f223 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -206,6 +206,11 @@ class ObjectOperations(interfaces.ObjectInterface): hash_type=hash_type, timeout=timeout, ) + + if range: + # Cut off the range and return only hash + return result.stdout.split(":")[1].strip() + return result.stdout @reporter.step("Head object") @@ -407,6 +412,57 @@ class ObjectOperations(interfaces.ObjectInterface): oid = id_str.split(":")[1] return oid.strip() + @reporter.step("Patch object") + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: list[str] = None, + payloads: list[str] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + trace: bool = False, + ) -> str: + """ + PATCH an object. + + Args: + cid: ID of Container where we get the Object from + oid: Object ID + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] + payloads: An array of file paths to be applied in each range + new_attrs: Attributes to be changed in the format "key1=value1,key2=value2" + replace_attrs: Replace all attributes completely with new ones specified in new_attrs + bearer: Path to Bearer Token file, appends to `--bearer` key + xhdr: Request X-Headers in form of Key=Value + session: Path to a JSON-encoded container session token + timeout: Timeout for the operation + trace: Generate trace ID and print it + Returns: + (str): ID of patched Object + """ + result = self.cli.object.patch( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=ranges, + payload=payloads, + new_attrs=new_attrs, + replace_attrs=replace_attrs, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + trace=trace, + ) + return result.stdout.split(":")[1].strip() + @reporter.step("Put object to random node") def put_to_random_node( self, @@ -622,3 +678,30 @@ class ObjectOperations(interfaces.ObjectInterface): ] return object_nodes + + @reporter.step("Search parts of object") + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[str]: + endpoint = alive_node.storage_node.get_rpc_endpoint() + response = self.cli.object.nodes( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + json=True, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + response_json = json.loads(response.stdout) + return [data_object["object_id"] for data_object in response_json["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py index c293c2d..07fe52f 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -198,6 +198,24 @@ class ObjectInterface(ABC): ) -> str: pass + @abstractmethod + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: Optional[list[str]] = None, + payloads: Optional[list[str]] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ) -> str: + pass + @abstractmethod def put_to_random_node( self, @@ -264,6 +282,20 @@ class ObjectInterface(ABC): ) -> List[ClusterNode]: pass + @abstractmethod + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[str]: + pass + class ContainerInterface(ABC): @abstractmethod From 61353cb38c723a3d3513de96a4ae7f142ed3c637 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 5 Dec 2024 14:17:25 +0300 Subject: [PATCH 233/274] [#332] Fix `files` param in http client Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 0d1e0bd..6008989 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -2,7 +2,7 @@ import io import json import logging import logging.config -from typing import IO +from typing import Mapping, Sequence import httpx @@ -84,13 +84,20 @@ class HttpClient: return request_body @classmethod - def _parse_files(cls, files: dict | None) -> str | None: + def _parse_files(cls, files: Mapping | Sequence | None) -> str | None: if not files: return None filepaths = {} - for name, file in files.items(): + if isinstance(files, Sequence): + items = files + elif isinstance(files, Mapping): + items = files.items() + else: + raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}") + + for name, file in items: if isinstance(file, io.IOBase): filepaths[name] = file.name From ee7d9df4a9eddf7da21b66a2070227c0aaa71ad2 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 5 Dec 2024 16:34:36 +0300 Subject: [PATCH 234/274] [#333] Fix `files` param in http client part two Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 26 ++++++++++--------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 6008989..a3e3e54 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -84,12 +84,12 @@ class HttpClient: return request_body @classmethod - def _parse_files(cls, files: Mapping | Sequence | None) -> str | None: - if not files: - return None - + def _parse_files(cls, files: Mapping | Sequence | None) -> dict: filepaths = {} + if not files: + return filepaths + if isinstance(files, Sequence): items = files elif isinstance(files, Mapping): @@ -100,11 +100,10 @@ class HttpClient: for name, file in items: if isinstance(file, io.IOBase): filepaths[name] = file.name - - if isinstance(file, tuple): + elif isinstance(file, Sequence): filepaths[name] = file[1].name - return json.dumps(filepaths, default=str, indent=4) + return filepaths @classmethod def _attach_response(cls, response: httpx.Response, **kwargs): @@ -128,23 +127,18 @@ class HttpClient: + f"Response Headers: {response_headers}\n\n" + (f"Response Body: {response_body}\n\n" if response_body else "") ) - curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, files) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files) reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") @classmethod - def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict = None) -> str: + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) data = f" -d '{data}'" if data else "" - if files: - for name, file in files.items(): - if isinstance(file, io.IOBase): - data += f' -F "{name}=@{file.name}"' - - if isinstance(file, tuple): - data += f' -F "{name}=@{file[1].name}"' + for name, path in files.items(): + data += f' -F "{name}=@{path}"' # Option -k means no verify SSL return f"curl {url} -X {method} {headers}{data} -k" From 0ebb8453290b26f85bce7091dd6ea307df5f0d9a Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 6 Dec 2024 10:50:34 +0300 Subject: [PATCH 235/274] [#335] Fixed iam boto3 client --- src/frostfs_testlib/s3/boto3_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index e7f2c35..c680f17 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -86,6 +86,7 @@ class Boto3ClientWrapper(S3ClientWrapper): service_name="iam", aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, + region_name=self.region, endpoint_url=self.iam_endpoint, verify=False, ) From 8ff1e72499f49054b7cf0d8fd05f87b040e5d32f Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Fri, 13 Dec 2024 10:45:14 +0300 Subject: [PATCH 236/274] [#337] Add rule chain error Signed-off-by: Ekaterina Chernitsyna --- src/frostfs_testlib/resources/error_patterns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 9b5e8e4..4c22648 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -9,6 +9,7 @@ OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" SESSION_NOT_FOUND = "code = 4096.*message = session token not found" OUT_OF_RANGE = "code = 2053.*message = out of range" EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" +ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied" # TODO: Change to codes with message # OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" # LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed From cd15be3b7c41448280217aac741f2fc1efefac95 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 15 Nov 2024 21:03:21 +0300 Subject: [PATCH 237/274] [#334] Automation of PATCH method in S3 Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 6 +- src/frostfs_testlib/s3/aws_cli_client.py | 7 +- src/frostfs_testlib/s3/boto3_client.py | 7 +- src/frostfs_testlib/s3/interfaces.py | 4 +- src/frostfs_testlib/s3/s3_http_client.py | 127 ++++++++++++++++++++++ src/frostfs_testlib/steps/s3/s3_helper.py | 24 ++++ 6 files changed, 162 insertions(+), 13 deletions(-) create mode 100644 src/frostfs_testlib/s3/s3_http_client.py diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index a3e3e54..c3e5fae 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -46,9 +46,9 @@ class HttpClient: logger.info(f"Response: {response.status_code} => {response.text}") if expected_status_code: - assert response.status_code == expected_status_code, ( - f"Got {response.status_code} response code" f" while {expected_status_code} expected" - ) + assert ( + response.status_code == expected_status_code + ), f"Got {response.status_code} response code while {expected_status_code} expected" return response diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 2ac6d68..4196c77 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -171,7 +171,7 @@ class AwsCliClient(S3ClientWrapper): return response.get("TagSet") @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> list: + def get_bucket_acl(self, bucket: str) -> dict: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' @@ -179,8 +179,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Grants") + return self._to_json(output) @reporter.step("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: @@ -861,7 +860,7 @@ class AwsCliClient(S3ClientWrapper): return response["Parts"] @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index c680f17..6b6c74e 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -230,14 +230,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("TagSet") @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> list: - response = self._exec_request( + def get_bucket_acl(self, bucket: str) -> dict: + return self._exec_request( self.boto3_client.get_bucket_acl, params={"Bucket": bucket}, endpoint=self.s3gate_endpoint, profile=self.profile, ) - return response.get("Grants") @reporter.step("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: @@ -705,7 +704,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["Parts"] @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] params = self._convert_to_s3_params(locals(), exclude=["parts"]) params["MultipartUpload"] = {"Parts": parts} diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index c3d99eb..7ce9f31 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -128,7 +128,7 @@ class S3ClientWrapper(HumanReadableABC): """Deletes the tags from the bucket.""" @abstractmethod - def get_bucket_acl(self, bucket: str) -> list: + def get_bucket_acl(self, bucket: str) -> dict: """This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket.""" @abstractmethod @@ -336,7 +336,7 @@ class S3ClientWrapper(HumanReadableABC): """Lists the parts that have been uploaded for a specific multipart upload.""" @abstractmethod - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: """Completes a multipart upload by assembling previously uploaded parts.""" @abstractmethod diff --git a/src/frostfs_testlib/s3/s3_http_client.py b/src/frostfs_testlib/s3/s3_http_client.py new file mode 100644 index 0000000..a34c380 --- /dev/null +++ b/src/frostfs_testlib/s3/s3_http_client.py @@ -0,0 +1,127 @@ +import hashlib +import logging +import xml.etree.ElementTree as ET + +import httpx +from botocore.auth import SigV4Auth +from botocore.awsrequest import AWSRequest +from botocore.credentials import Credentials + +from frostfs_testlib import reporter +from frostfs_testlib.http.http_client import HttpClient +from frostfs_testlib.utils.file_utils import TestFile + +logger = logging.getLogger("NeoLogger") + +DEFAULT_TIMEOUT = 60.0 + + +class S3HttpClient: + def __init__( + self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" + ) -> None: + self.http_client = HttpClient() + self.s3gate_endpoint = s3gate_endpoint + self.credentials = Credentials(access_key_id, secret_access_key) + self.profile = profile + self.region = region + self.service = "s3" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + + def _to_s3_header(self, header: str) -> dict: + replacement_map = { + "Acl": "ACL", + "_": "-", + } + + result = header + if not header.startswith("x_amz"): + result = header.title() + + for find, replace in replacement_map.items(): + result = result.replace(find, replace) + + return result + + def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None): + exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] + return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None} + + def _create_aws_request( + self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None + ) -> AWSRequest: + data = b"" + + if content is not None: + if isinstance(content, TestFile): + with open(content, "rb") as io_content: + data = io_content.read() + elif isinstance(content, str): + data = bytes(content, encoding="utf-8") + elif isinstance(content, bytes): + data = content + else: + raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}") + + headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest() + aws_request = AWSRequest(method, url, headers, data, params) + self.signature.add_auth(aws_request) + + return aws_request + + def _exec_request( + self, + method: str, + url: str, + headers: dict, + content: str | bytes | TestFile = None, + params: dict = None, + timeout: float = DEFAULT_TIMEOUT, + ) -> dict: + aws_request = self._create_aws_request(method, url, headers, content, params) + response = self.http_client.send( + aws_request.method, + aws_request.url, + headers=dict(aws_request.headers), + data=aws_request.data, + params=aws_request.params, + timeout=timeout, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError: + raise httpx.HTTPStatusError(response.text, request=response.request, response=response) + + root = ET.fromstring(response.read()) + data = { + "LastModified": root.find(".//LastModified").text, + "ETag": root.find(".//ETag").text, + } + + if response.headers.get("x-amz-version-id"): + data["VersionId"] = response.headers.get("x-amz-version-id") + + return data + + @reporter.step("Patch object S3") + def patch_object( + self, + bucket: str, + key: str, + content: str | bytes | TestFile, + content_range: str, + version_id: str = None, + if_match: str = None, + if_unmodified_since: str = None, + x_amz_expected_bucket_owner: str = None, + timeout: float = DEFAULT_TIMEOUT, + ) -> dict: + if content_range and not content_range.startswith("bytes"): + content_range = f"bytes {content_range}/*" + + url = f"{self.s3gate_endpoint}/{bucket}/{key}" + headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"]) + params = {"VersionId": version_id} if version_id is not None else None + + return self._exec_request("PATCH", url, headers, content, params, timeout=timeout) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index dbf48d3..7949f2d 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -12,6 +12,7 @@ from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.utils.file_utils import TestFile, get_file_hash logger = logging.getLogger("NeoLogger") @@ -185,3 +186,26 @@ def search_nodes_with_bucket( break nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) return nodes_list + + +def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int: + if isinstance(value, int): + return value + + if "part" not in value and "object" not in value: + return int(value) + + if object_size is not None: + value = value.replace("object", str(object_size)) + + if part_size is not None: + value = value.replace("part", str(part_size)) + + return int(eval(value)) + + +def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int: + start, end = rng.split(":") + start = get_bytes_relative_to_object(start, object_size, part_size) + end = get_bytes_relative_to_object(end, object_size, part_size) + return (start, end) if int_values else f"bytes {start}-{end}/*" From cc7bd4ffc9dd59115144bdd4cf81ff07ffe8b372 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 17 Dec 2024 13:55:15 +0300 Subject: [PATCH 238/274] [#339] Added ns args for func container create Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/container.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 809b39a..db896ce 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -111,6 +111,8 @@ def create_container( options: Optional[dict] = None, await_mode: bool = True, wait_for_creation: bool = True, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> str: """ @@ -143,6 +145,8 @@ def create_container( result = cli.container.create( rpc_endpoint=endpoint, policy=rule, + nns_name=nns_name, + nns_zone=nns_zone, basic_acl=basic_acl, attributes=attributes, name=name, From 335eed85b152e2e8ac147bc95cc2af88beaad7ff Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 16 Dec 2024 22:06:00 +0300 Subject: [PATCH 239/274] [#338] Added parameter word_count to method get_logs Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/hosting/docker_host.py | 1 + src/frostfs_testlib/hosting/interfaces.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 01dc6b5..d458b0a 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -250,6 +250,7 @@ class DockerHost(Host): unit: Optional[str] = None, exclude_filter: Optional[str] = None, priority: Optional[str] = None, + word_count: bool = None, ) -> str: client = self._get_docker_client() filtered_logs = "" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 6d1e5da..f58d856 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -324,6 +324,7 @@ class Host(ABC): unit: Optional[str] = None, exclude_filter: Optional[str] = None, priority: Optional[str] = None, + word_count: bool = None, ) -> str: """Get logs from host filtered by regex. @@ -334,6 +335,7 @@ class Host(ABC): unit: required unit. priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. + word_count: output type, expected values: lines, bytes, json Returns: Found entries as str if any found. From dc5a9e7bb9336a9b331c119a09615e68f4703d01 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 17 Dec 2024 18:16:54 +0300 Subject: [PATCH 240/274] [#340] Move s3 and http directories to avoid conflict with requests Signed-off-by: Kirill Sosnovskikh --- pyproject.toml | 2 +- src/frostfs_testlib/clients/__init__.py | 5 +++++ src/frostfs_testlib/{ => clients}/http/__init__.py | 0 src/frostfs_testlib/{ => clients}/http/http_client.py | 0 src/frostfs_testlib/clients/s3/__init__.py | 1 + src/frostfs_testlib/{ => clients}/s3/aws_cli_client.py | 2 +- src/frostfs_testlib/{ => clients}/s3/boto3_client.py | 2 +- src/frostfs_testlib/{ => clients}/s3/curl_bucket_resolver.py | 2 +- src/frostfs_testlib/{ => clients}/s3/interfaces.py | 0 src/frostfs_testlib/{ => clients}/s3/s3_http_client.py | 2 +- src/frostfs_testlib/s3/__init__.py | 3 --- src/frostfs_testlib/steps/cli/container.py | 2 -- src/frostfs_testlib/steps/http/__init__.py | 0 src/frostfs_testlib/steps/{http => }/http_gate.py | 2 +- src/frostfs_testlib/steps/{s3 => }/s3_helper.py | 4 +--- .../storage/grpc_operations/implementations/container.py | 2 +- tests/test_dataclasses.py | 2 +- 17 files changed, 15 insertions(+), 16 deletions(-) create mode 100644 src/frostfs_testlib/clients/__init__.py rename src/frostfs_testlib/{ => clients}/http/__init__.py (100%) rename src/frostfs_testlib/{ => clients}/http/http_client.py (100%) create mode 100644 src/frostfs_testlib/clients/s3/__init__.py rename src/frostfs_testlib/{ => clients}/s3/aws_cli_client.py (99%) rename src/frostfs_testlib/{ => clients}/s3/boto3_client.py (99%) rename src/frostfs_testlib/{ => clients}/s3/curl_bucket_resolver.py (88%) rename src/frostfs_testlib/{ => clients}/s3/interfaces.py (100%) rename src/frostfs_testlib/{ => clients}/s3/s3_http_client.py (98%) delete mode 100644 src/frostfs_testlib/s3/__init__.py delete mode 100644 src/frostfs_testlib/steps/http/__init__.py rename src/frostfs_testlib/steps/{http => }/http_gate.py (99%) rename src/frostfs_testlib/steps/{s3 => }/s3_helper.py (97%) diff --git a/pyproject.toml b/pyproject.toml index 3faa637..2778f8a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,7 +62,7 @@ authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3Credentia wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider" [project.entry-points."frostfs.testlib.bucket_cid_resolver"] -frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver" +frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver" [tool.isort] profile = "black" diff --git a/src/frostfs_testlib/clients/__init__.py b/src/frostfs_testlib/clients/__init__.py new file mode 100644 index 0000000..e46766b --- /dev/null +++ b/src/frostfs_testlib/clients/__init__.py @@ -0,0 +1,5 @@ +from frostfs_testlib.clients.http.http_client import HttpClient +from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper +from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient diff --git a/src/frostfs_testlib/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py similarity index 100% rename from src/frostfs_testlib/http/__init__.py rename to src/frostfs_testlib/clients/http/__init__.py diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py similarity index 100% rename from src/frostfs_testlib/http/http_client.py rename to src/frostfs_testlib/clients/http/http_client.py diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py new file mode 100644 index 0000000..65a3990 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py similarity index 99% rename from src/frostfs_testlib/s3/aws_cli_client.py rename to src/frostfs_testlib/clients/s3/aws_cli_client.py index 4196c77..3496b2b 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -6,8 +6,8 @@ from time import sleep from typing import Literal, Optional, Union from frostfs_testlib import reporter +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.utils import string_utils diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py similarity index 99% rename from src/frostfs_testlib/s3/boto3_client.py rename to src/frostfs_testlib/clients/s3/boto3_client.py index 6b6c74e..53e7ffa 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -13,8 +13,8 @@ from botocore.exceptions import ClientError from mypy_boto3_s3 import S3Client from frostfs_testlib import reporter +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.utils import string_utils # TODO: Refactor this code to use shell instead of _cmd_run diff --git a/src/frostfs_testlib/s3/curl_bucket_resolver.py b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py similarity index 88% rename from src/frostfs_testlib/s3/curl_bucket_resolver.py rename to src/frostfs_testlib/clients/s3/curl_bucket_resolver.py index b713e79..4d845cf 100644 --- a/src/frostfs_testlib/s3/curl_bucket_resolver.py +++ b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.cli.generic_cli import GenericCli -from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.clients.s3 import BucketContainerResolver from frostfs_testlib.storage.cluster import ClusterNode diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py similarity index 100% rename from src/frostfs_testlib/s3/interfaces.py rename to src/frostfs_testlib/clients/s3/interfaces.py diff --git a/src/frostfs_testlib/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py similarity index 98% rename from src/frostfs_testlib/s3/s3_http_client.py rename to src/frostfs_testlib/clients/s3/s3_http_client.py index a34c380..b83e7a8 100644 --- a/src/frostfs_testlib/s3/s3_http_client.py +++ b/src/frostfs_testlib/clients/s3/s3_http_client.py @@ -8,7 +8,7 @@ from botocore.awsrequest import AWSRequest from botocore.credentials import Credentials from frostfs_testlib import reporter -from frostfs_testlib.http.http_client import HttpClient +from frostfs_testlib.clients import HttpClient from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") diff --git a/src/frostfs_testlib/s3/__init__.py b/src/frostfs_testlib/s3/__init__.py deleted file mode 100644 index 32426c2..0000000 --- a/src/frostfs_testlib/s3/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from frostfs_testlib.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index db896ce..092b1a3 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -7,9 +7,7 @@ from typing import Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC -from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node from frostfs_testlib.storage.cluster import Cluster, ClusterNode diff --git a/src/frostfs_testlib/steps/http/__init__.py b/src/frostfs_testlib/steps/http/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http_gate.py similarity index 99% rename from src/frostfs_testlib/steps/http/http_gate.py rename to src/frostfs_testlib/steps/http_gate.py index 117cded..4e712c1 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -12,8 +12,8 @@ import requests from frostfs_testlib import reporter from frostfs_testlib.cli import GenericCli +from frostfs_testlib.clients.s3.aws_cli_client import command_options from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE -from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3_helper.py similarity index 97% rename from src/frostfs_testlib/steps/s3/s3_helper.py rename to src/frostfs_testlib/steps/s3_helper.py index 7949f2d..c3092df 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3_helper.py @@ -6,13 +6,11 @@ from typing import Optional from dateutil.parser import parse from frostfs_testlib import reporter -from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus -from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.utils.file_utils import TestFile, get_file_hash logger = logging.getLogger("NeoLogger") diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index 7a637d7..86cac26 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -5,9 +5,9 @@ from typing import List, Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.clients.s3 import BucketContainerResolver from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.utils import json_utils diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index 19f3832..677aed4 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -2,7 +2,7 @@ from typing import Any import pytest -from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper +from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper from frostfs_testlib.storage.dataclasses.acl import EACLRole from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.object_size import ObjectSize From 0479701258ba115fce9ee3e91783b112b473a4ca Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Wed, 18 Dec 2024 17:35:14 +0300 Subject: [PATCH 241/274] [#341] Add test for multipart object in Test_http_object testsuite Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/steps/http_gate.py | 48 ++++++++++---------------- 1 file changed, 19 insertions(+), 29 deletions(-) diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http_gate.py index 4e712c1..51b0301 100644 --- a/src/frostfs_testlib/steps/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -38,34 +38,34 @@ def get_via_http_gate( """ This function gets given object from HTTP gate cid: container id to get object from - oid: object ID + oid: object id / object key node: node to make request request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] """ - # if `request_path` parameter omitted, use default - if request_path is None: - request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - else: + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" + if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False) + response = requests.get(request, stream=True, timeout=timeout, verify=False) - if not resp.ok: + if not response.ok: raise Exception( f"""Failed to get object via HTTP gate: - request: {resp.request.path_url}, - response: {resp.text}, - headers: {resp.headers}, - status code: {resp.status_code} {resp.reason}""" + request: {response.request.path_url}, + response: {response.text}, + headers: {response.headers}, + status code: {response.status_code} {response.reason}""" ) logger.info(f"Request: {request}") - _attach_allure_step(request, resp.status_code) + _attach_allure_step(request, response.status_code) test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) with open(test_file, "wb") as file: - shutil.copyfileobj(resp.raw, file) + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + return test_file @@ -117,12 +117,12 @@ def get_via_http_gate_by_attribute( endpoint: http gate endpoint request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] """ + attr_name = list(attribute.keys())[0] attr_value = quote_plus(str(attribute.get(attr_name))) - # if `request_path` parameter ommited, use default - if request_path is None: - request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" - else: + + request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" resp = requests.get(request, stream=True, timeout=timeout, verify=False) @@ -357,19 +357,9 @@ def try_to_get_object_via_passed_request_and_expect_error( ) -> None: try: if attrs is None: - get_via_http_gate( - cid=cid, - oid=oid, - node=node, - request_path=http_request_path, - ) + get_via_http_gate(cid, oid, node, http_request_path) else: - get_via_http_gate_by_attribute( - cid=cid, - attribute=attrs, - node=node, - request_path=http_request_path, - ) + get_via_http_gate_by_attribute(cid, attrs, node, http_request_path) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() From 6e951443edbb822e5cc7ac5a4b32b341cb114634 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 24 Dec 2024 11:16:38 +0300 Subject: [PATCH 242/274] [#342] Remove try-catch from delete block Signed-off-by: a.berezin --- .../implementations/container.py | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index 86cac26..75af00c 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -181,20 +181,17 @@ class ContainerOperations(interfaces.ContainerInterface): force: bool = False, trace: bool = False, ): - try: - return self.cli.container.delete( - rpc_endpoint=endpoint, - cid=cid, - address=address, - await_mode=await_mode, - session=session, - ttl=ttl, - xhdr=xhdr, - force=force, - trace=trace, - ).stdout - except RuntimeError as e: - print(f"Error request:\n{e}") + return self.cli.container.delete( + rpc_endpoint=endpoint, + cid=cid, + address=address, + await_mode=await_mode, + session=session, + ttl=ttl, + xhdr=xhdr, + force=force, + trace=trace, + ).stdout @reporter.step("Get container") def get( From 9e3380d519be5f59279e5530b1e0a84a89286bb8 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Tue, 10 Dec 2024 15:42:13 +0300 Subject: [PATCH 243/274] [#336] Refine CODEOWNERS settings Signed-off-by: Vitaliy Potyarkin --- CODEOWNERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 4a621d3..519ca42 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1,3 @@ -* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov +.* @TrueCloudLab/qa-committers +.forgejo/.* @potyarkin +Makefile @potyarkin From 0a3de927a2cf2c89c7d29f633083ef079f773cbc Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 10 Dec 2024 11:47:25 +0300 Subject: [PATCH 244/274] [#343] Extend testsuites for PATCH method Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/frostfs_cli/object.py | 3 +- .../clients/s3/s3_http_client.py | 28 +++++++++++++++++-- .../storage/dataclasses/ape.py | 1 + 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 0c00563..e536544 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -315,8 +315,9 @@ class FrostfsCliObject(CliCommand): ttl: TTL value in request meta header (default 2) wallet: WIF (NEP-2) string or path to the wallet or binary key xhdr: Dict with request X-Headers + Returns: - (str): ID of patched Object + Command's result. """ return self._execute( "object patch", diff --git a/src/frostfs_testlib/clients/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py index b83e7a8..f6f423d 100644 --- a/src/frostfs_testlib/clients/s3/s3_http_client.py +++ b/src/frostfs_testlib/clients/s3/s3_http_client.py @@ -21,12 +21,16 @@ class S3HttpClient: self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.http_client = HttpClient() - self.s3gate_endpoint = s3gate_endpoint self.credentials = Credentials(access_key_id, secret_access_key) self.profile = profile self.region = region - self.service = "s3" - self.signature = SigV4Auth(self.credentials, self.service, self.region) + + self.iam_endpoint: str = None + self.s3gate_endpoint: str = None + self.service: str = None + self.signature: SigV4Auth = None + + self.set_endpoint(s3gate_endpoint) def _to_s3_header(self, header: str) -> dict: replacement_map = { @@ -104,6 +108,24 @@ class S3HttpClient: return data + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + if self.s3gate_endpoint == s3gate_endpoint: + return + + self.s3gate_endpoint = s3gate_endpoint + self.service = "s3" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + + @reporter.step("Set endpoint IAM to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + if self.iam_endpoint == iam_endpoint: + return + + self.iam_endpoint = iam_endpoint + self.service = "iam" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + @reporter.step("Patch object S3") def patch_object( self, diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index b7b5dfc..1199435 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -52,6 +52,7 @@ class Operations: SEARCH_OBJECT = "SearchObject" HEAD_OBJECT = "HeadObject" PUT_OBJECT = "PutObject" + PATCH_OBJECT = "PatchObject" class Verb(HumanReadableEnum): From 6fe7fef44b100b976c5a72aad76477a277975b05 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 25 Dec 2024 19:25:14 +0300 Subject: [PATCH 245/274] [#344] Update ifaces Signed-off-by: a.berezin --- src/frostfs_testlib/cli/netmap_parser.py | 4 ++-- src/frostfs_testlib/steps/cli/object.py | 6 +++++- .../storage/controllers/cluster_state_controller.py | 8 +++++--- .../storage/grpc_operations/implementations/chunks.py | 6 +++--- .../storage/grpc_operations/implementations/object.py | 3 ++- 5 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 23ac4da..db6f55f 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -85,7 +85,7 @@ class NetmapParser: @staticmethod def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip] + snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.get_interface(Interfaces.MGMT)] if not snapshot_node: return None return snapshot_node[0] diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index f28de06..7f8391d 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -12,6 +12,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils @@ -752,7 +753,10 @@ def get_object_nodes( ] object_nodes = [ - cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) ] return object_nodes diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 67e4d60..3a10ded 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -19,7 +19,7 @@ from frostfs_testlib.steps.node_management import include_node_to_network_map, r from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success @@ -454,9 +454,11 @@ class ClusterStateController: if not checker_node: checker_node = cluster_node netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) - netmap = [node for node in netmap if cluster_node.host_ip == node.node] + netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node] if status == NodeStatus.OFFLINE: - assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" + assert ( + cluster_node.get_interface(Interfaces.MGMT) not in netmap + ), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline" else: assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index 7f3161c..ad45855 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -6,7 +6,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.cli_utils import parse_netmap_output @@ -30,7 +30,7 @@ class ChunksOperations(interfaces.ChunksInterface): result = [] for node_info in netmap: for cluster_node in cluster.cluster_nodes: - if node_info.node == cluster_node.host_ip: + if node_info.node == cluster_node.get_interface(Interfaces.MGMT): result.append(cluster_node) return result @@ -40,7 +40,7 @@ class ChunksOperations(interfaces.ChunksInterface): for node_info in netmap: if node_info.node_id in chunk.confirmed_nodes: for cluster_node in cluster.cluster_nodes: - if cluster_node.host_ip == node_info.node: + if cluster_node.get_interface(Interfaces.MGMT) == node_info.node: return (cluster_node, node_info) @wait_for_success(300, 5, fail_testcase=None) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index f31f223..be8a470 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -11,6 +11,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations from frostfs_testlib.testing.test_control import wait_for_success @@ -674,7 +675,7 @@ class ObjectOperations(interfaces.ObjectInterface): cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.host_ip + if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) ] return object_nodes From 974836f1bd91a3fc567b7d64b853f051e53d7cec Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 13 Jan 2025 12:58:29 +0300 Subject: [PATCH 246/274] [#346] Added correct exception in Chunks parse Signed-off-by: Dmitriy Zayakin --- .../storage/grpc_operations/implementations/chunks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index ad45855..0d787e2 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -161,5 +161,5 @@ class ChunksOperations(interfaces.ChunksInterface): def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: parse_result = json.loads(object_nodes) if parse_result.get("errors"): - raise parse_result["errors"] + raise RuntimeError(", ".join(parse_result["errors"])) return [Chunk(**chunk) for chunk in parse_result["data_objects"]] From 5a291c5b7f9374a7f9c8b479158024e73459616d Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Mon, 13 Jan 2025 16:32:47 +0300 Subject: [PATCH 247/274] [#347] remove stderr check Signed-off-by: m.malygina --- src/frostfs_testlib/processes/remote_process.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 5624940..071675a 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -193,7 +193,7 @@ class RemoteProcess: ) if "No such file or directory" in terminal.stderr: return None - elif terminal.stderr or terminal.return_code != 0: + elif terminal.return_code != 0: raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") return terminal.stdout From daf186690beff8d4f8bafbbdfa7aedd1c458317d Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 10 Jan 2025 14:29:03 +0300 Subject: [PATCH 248/274] [#345] Fix curl request generation Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/clients/http/http_client.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py index c3e5fae..aebd5ef 100644 --- a/src/frostfs_testlib/clients/http/http_client.py +++ b/src/frostfs_testlib/clients/http/http_client.py @@ -134,9 +134,10 @@ class HttpClient: @classmethod def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: - headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) - data = f" -d '{data}'" if data else "" + excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"} + headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers) + data = f" -d '{data}'" if data else "" for name, path in files.items(): data += f' -F "{name}=@{path}"' From 80dd8d0b169dbbbd875c03b753f119ad2fce382a Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Wed, 15 Jan 2025 16:31:54 +0300 Subject: [PATCH 249/274] [#348] Fixed check of fields in S3 aws/boto3 methods related to policies Signed-off-by: y.lukoyanova --- src/frostfs_testlib/clients/s3/aws_cli_client.py | 12 ++++++------ src/frostfs_testlib/clients/s3/boto3_client.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index 3496b2b..accc289 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -1227,7 +1227,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @@ -1239,7 +1239,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @@ -1264,7 +1264,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response @@ -1276,7 +1276,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @@ -1288,7 +1288,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @@ -1324,7 +1324,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 53e7ffa..890b4e9 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -1091,7 +1091,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") @@ -1102,7 +1102,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") @@ -1127,7 +1127,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM groups") @@ -1137,7 +1137,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") @@ -1148,7 +1148,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists all the managed policies that are available in your AWS account") @@ -1180,7 +1180,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM users") From aed20e02accb3656ebf2b480fa7b884de6768f7d Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 17 Jan 2025 17:37:51 +0300 Subject: [PATCH 250/274] [#349] Fixed hook pytest-collect-modifyitems Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/__init__.py | 2 +- src/frostfs_testlib/hooks.py | 19 +++++++++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 1ceb972..e557a79 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,4 +1,4 @@ __version__ = "2.0.1" from .fixtures import configure_testlib, hosting, temp_directory -from .hooks import pytest_collection_modifyitems +from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index 6830e78..1ada660 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -1,8 +1,8 @@ import pytest -@pytest.hookimpl -def pytest_collection_modifyitems(items: list[pytest.Item]): +@pytest.hookimpl(specname="pytest_collection_modifyitems") +def pytest_add_frostfs_marker(items: list[pytest.Item]): # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding # nodeid = full path of the test # 1. plugins @@ -11,3 +11,18 @@ def pytest_collection_modifyitems(items: list[pytest.Item]): location = item.location[0] if "frostfs" in location and "plugin" not in location and "testlib" not in location: item.add_marker("frostfs") + + +# pytest hook. Do not rename +@pytest.hookimpl(trylast=True) +def pytest_collection_modifyitems(items: list[pytest.Item]): + # Change order of tests based on @pytest.mark.order() marker + def order(item: pytest.Item) -> int: + order_marker = item.get_closest_marker("order") + if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)): + raise RuntimeError("Incorrect usage of pytest.mark.order") + + order_value = order_marker.args[0] if order_marker else 0 + return order_value + + items.sort(key=lambda item: order(item)) From 0015ea7f93a1a102cd08fbbd5276bc9ca508c620 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 23 Jan 2025 17:46:47 +0300 Subject: [PATCH 251/274] [#350] Add ape rule for load config Signed-off-by: a.berezin --- src/frostfs_testlib/load/load_config.py | 4 ++- tests/test_load_config.py | 39 +++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 15103e0..3830203 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -182,8 +182,10 @@ class Preset(MetaConfig): pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) # Workers count for preset workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) - # Acl for container/buckets + # TODO: Deprecated. Acl for container/buckets acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) + # APE rule for containers instead of deprecated ACL + rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list) # ------ GRPC ------ # Amount of containers which should be created diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 883b1f2..fbeb587 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -6,10 +6,7 @@ import pytest from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME -from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController -from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode -from frostfs_testlib.storage.dataclasses.node_base import NodeBase @dataclass @@ -129,6 +126,8 @@ class TestLoadConfig: "--size '11'", "--acl 'acl'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -161,6 +160,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -317,6 +318,8 @@ class TestLoadConfig: "--no-verify-ssl", "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -350,6 +353,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -415,6 +420,26 @@ class TestLoadConfig: self._check_preset_params(load_params, params) + @pytest.mark.parametrize( + "load_type, input, value, params", + [ + (LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]), + (LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]), + (LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]), + (LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]), + (LoadType.gRPC, None, None, []), + (LoadType.S3, ["A C ", " B"], ["A C", "B"], []), + (LoadType.S3, None, None, []), + ], + ) + def test_ape_list_parsing_formatter(self, load_type, input, value, params): + load_params = LoadParams(load_type) + load_params.preset = Preset() + load_params.preset.rule = input + assert load_params.preset.rule == value + + self._check_preset_params(load_params, params) + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): expected_env_vars = { @@ -444,6 +469,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -475,6 +502,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -582,6 +611,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -613,6 +644,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", From ace9564243b8e7c4740c296dcfe0f55a06e719cd Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 30 Jan 2025 11:16:23 +0300 Subject: [PATCH 252/274] [#352] Fix versions parsing Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/utils/version_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 490abb0..0676085 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -64,7 +64,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: try: result = shell.exec(f"{binary_path} {binary['param']}") version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" - versions_at_host[binary_name] = version + versions_at_host[binary_name] = version.strip() except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") versions_at_host[binary_name] = "Unknown" From b44705eb2fd23ca0db313b07e8b5616367ce0d8f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 30 Jan 2025 14:38:22 +0300 Subject: [PATCH 253/274] [#353] Added Netmap command for CliWrapper Signed-off-by: Dmitriy Zayakin --- requirements.txt | 1 + src/frostfs_testlib/cli/frostfs_cli/netmap.py | 4 + src/frostfs_testlib/cli/netmap_parser.py | 29 +- .../dataclasses/storage_object_info.py | 36 +- .../grpc_operations/client_wrappers.py | 12 +- .../implementations/__init__.py | 4 + .../grpc_operations/implementations/netmap.py | 171 +++++++ .../storage/grpc_operations/interfaces.py | 424 ------------------ .../grpc_operations/interfaces/__init__.py | 4 + .../grpc_operations/interfaces/chunks.py | 79 ++++ .../grpc_operations/interfaces/container.py | 125 ++++++ .../grpc_operations/interfaces/netmap.py | 89 ++++ .../grpc_operations/interfaces/object.py | 223 +++++++++ .../grpc_operations/interfaces_wrapper.py | 10 + 14 files changed, 770 insertions(+), 441 deletions(-) create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py delete mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/container.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/object.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py diff --git a/requirements.txt b/requirements.txt index e012366..a0bcc11 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ tenacity==8.0.1 pytest==7.1.2 boto3==1.35.30 boto3-stubs[essential]==1.35.30 +pydantic==2.10.6 # Dev dependencies black==22.8.0 diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index d219940..cd197d3 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -12,6 +12,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -42,6 +43,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -73,6 +75,7 @@ class FrostfsCliNetmap(CliCommand): generate_key: bool = False, json: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -104,6 +107,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index db6f55f..2c97c3a 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeNetInfo, NodeNetmapInfo, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -20,8 +20,6 @@ class NetmapParser: "withdrawal_fee": r"Withdrawal fee: (?P\d+)", "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", - "eigen_trust_alpha": r"EigenTrustAlpha: (?P\d+\w+$)", - "eigen_trust_iterations": r"EigenTrustIterations: (?P\d+)", } parse_result = {} @@ -64,7 +62,7 @@ class NetmapParser: for node in netmap_nodes: for key, regex in regexes.items(): search_result = re.search(regex, node, flags=re.MULTILINE) - if search_result == None: + if search_result is None: result_netmap[key] = None continue if key == "node_data_ips": @@ -83,9 +81,22 @@ class NetmapParser: return dataclasses_netmap @staticmethod - def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: + def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None: snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.get_interface(Interfaces.MGMT)] - if not snapshot_node: - return None - return snapshot_node[0] + for snapshot in snapshot_nodes: + for endpoint in snapshot.external_address: + if rpc_endpoint.split(":")[0] in endpoint: + return snapshot + + @staticmethod + def node_info(output: dict) -> NodeNetmapInfo: + data_dict = {"attributes": {}} + + for key, value in output.items(): + if key != "attributes": + data_dict[key] = value + + for attribute in output["attributes"]: + data_dict["attributes"][attribute["key"]] = attribute["value"] + + return NodeInfo(**data_dict) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 55a8388..4c303fc 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,6 +1,9 @@ +import re from dataclasses import dataclass from typing import Optional +from pydantic import BaseModel, Field, field_validator + from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum @@ -75,8 +78,37 @@ class NodeNetInfo: withdrawal_fee: str = None homomorphic_hashing_disabled: str = None maintenance_mode_allowed: str = None - eigen_trust_alpha: str = None - eigen_trust_iterations: str = None + + +class Attributes(BaseModel): + cluster_name: str = Field(alias="ClusterName") + continent: str = Field(alias="Continent") + country: str = Field(alias="Country") + country_code: str = Field(alias="CountryCode") + external_addr: list[str] = Field(alias="ExternalAddr") + location: str = Field(alias="Location") + node: str = Field(alias="Node") + subdiv: str = Field(alias="SubDiv") + subdiv_code: str = Field(alias="SubDivCode") + un_locode: str = Field(alias="UN-LOCODE") + role: str = Field(alias="role") + + @field_validator("external_addr", mode="before") + @classmethod + def convert_external_addr(cls, value: str) -> list[str]: + return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)] + + +class NodeInfo(BaseModel): + public_key: str = Field(alias="publicKey") + addresses: list[str] = Field(alias="addresses") + state: str = Field(alias="state") + attributes: Attributes = Field(alias="attributes") + + @field_validator("addresses", mode="before") + @classmethod + def convert_external_addr(cls, value: str) -> list[str]: + return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))] @dataclass diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py index 8cef23b..c1e3a31 100644 --- a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py +++ b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py @@ -1,14 +1,14 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.storage.grpc_operations import interfaces -from frostfs_testlib.storage.grpc_operations.implementations import container, object +from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper -class CliClientWrapper(interfaces.GrpcClientWrapper): +class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper): def __init__(self, cli: FrostfsCli) -> None: self.cli = cli - self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli) - self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli) + self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli) + self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli) + self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli) -class RpcClientWrapper(interfaces.GrpcClientWrapper): +class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper): pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py index e69de29..18e8ae5 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py @@ -0,0 +1,4 @@ +from .chunks import ChunksOperations +from .container import ContainerOperations +from .netmap import NetmapOperations +from .object import ObjectOperations diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py new file mode 100644 index 0000000..905171b --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py @@ -0,0 +1,171 @@ +import json as module_json +from typing import List, Optional + +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.cli.netmap_parser import NetmapParser +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo + +from .. import interfaces + + +class NetmapOperations(interfaces.NetmapInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + def epoch( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> int: + """ + Get current epoch number. + """ + output = ( + self.cli.netmap.epoch( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return int(output) + + def netinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetInfo: + """ + Get target node info. + """ + output = ( + self.cli.netmap.netinfo( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.netinfo(output) + + def nodeinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> NodeNetmapInfo: + """ + Get target node info. + """ + output = ( + self.cli.netmap.nodeinfo( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + json=json, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.node_info(module_json.loads(output)) + + def snapshot( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[NodeNetmapInfo]: + """ + Get target node info. + """ + output = ( + self.cli.netmap.snapshot( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.snapshot_all_nodes(output) + + def snapshot_one_node( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[NodeNetmapInfo]: + """ + Get target one node info. + """ + output = ( + self.cli.netmap.snapshot( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.snapshot_one_node(output, rpc_endpoint) diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py deleted file mode 100644 index 07fe52f..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ /dev/null @@ -1,424 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, List, Optional - -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.constants import PlacementRule -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo -from frostfs_testlib.utils import file_utils - - -class ChunksInterface(ABC): - @abstractmethod - def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: - pass - - @abstractmethod - def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: - pass - - @abstractmethod - def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: - pass - - @abstractmethod - def get_all( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> list[Chunk]: - pass - - @abstractmethod - def get_parity( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass - - @abstractmethod - def get_first_data( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass - - -class ObjectInterface(ABC): - def __init__(self) -> None: - self.chunks: ChunksInterface - - @abstractmethod - def delete( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def get( - self, - cid: str, - oid: str, - endpoint: str, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> file_utils.TestFile: - pass - - @abstractmethod - def get_from_random_node( - self, - cid: str, - oid: str, - cluster: Cluster, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def hash( - self, - endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - range: Optional[str] = None, - salt: Optional[str] = None, - ttl: Optional[int] = None, - session: Optional[str] = None, - hash_type: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def head( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - json_output: bool = True, - is_raw: bool = False, - is_direct: bool = False, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult | Any: - pass - - @abstractmethod - def lock( - self, - cid: str, - oid: str, - endpoint: str, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def put( - self, - path: str, - cid: str, - endpoint: str, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def patch( - self, - cid: str, - oid: str, - endpoint: str, - ranges: Optional[list[str]] = None, - payloads: Optional[list[str]] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - bearer: Optional[str] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - trace: bool = False, - ) -> str: - pass - - @abstractmethod - def put_to_random_node( - self, - path: str, - cid: str, - cluster: Cluster, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def range( - self, - cid: str, - oid: str, - range_cut: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> tuple[file_utils.TestFile, bytes]: - pass - - @abstractmethod - def search( - self, - cid: str, - endpoint: str, - bearer: str = "", - oid: Optional[str] = None, - filters: Optional[dict] = None, - expected_objects_list: Optional[list] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - phy: bool = False, - root: bool = False, - timeout: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - ttl: Optional[int] = None, - ) -> List: - pass - - @abstractmethod - def nodes( - self, - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - pass - - @abstractmethod - def parts( - self, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[str]: - pass - - -class ContainerInterface(ABC): - @abstractmethod - def create( - self, - endpoint: str, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - basic_acl: Optional[str] = None, - await_mode: bool = False, - disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, - name: Optional[str] = None, - nonce: Optional[str] = None, - policy: Optional[str] = None, - session: Optional[str] = None, - subnet: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - """ - Create a new container and register it in the FrostFS. - It will be stored in the sidechain when the Inner Ring accepts it. - """ - raise NotImplementedError("No implemethed method create") - - @abstractmethod - def delete( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - await_mode: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - force: bool = False, - trace: bool = False, - ) -> List[str]: - """ - Delete an existing container. - Only the owner of the container has permission to remove the container. - """ - raise NotImplementedError("No implemethed method delete") - - @abstractmethod - def get( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - json_mode: bool = True, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get container field info.""" - raise NotImplementedError("No implemethed method get") - - @abstractmethod - def get_eacl( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - json_mode: bool = True, - trace: bool = False, - to: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get extended ACL table of container.""" - raise NotImplementedError("No implemethed method get-eacl") - - @abstractmethod - def list( - self, - endpoint: str, - name: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - owner: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - **params, - ) -> List[str]: - """List all created containers.""" - raise NotImplementedError("No implemethed method list") - - @abstractmethod - def nodes( - self, - endpoint: str, - cid: str, - cluster: Cluster, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - """Show the nodes participating in the container in the current epoch.""" - raise NotImplementedError("No implemethed method nodes") - - -class GrpcClientWrapper(ABC): - def __init__(self) -> None: - self.object: ObjectInterface - self.container: ContainerInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py new file mode 100644 index 0000000..17b3e9c --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py @@ -0,0 +1,4 @@ +from .chunks import ChunksInterface +from .container import ContainerInterface +from .netmap import NetmapInterface +from .object import ObjectInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py new file mode 100644 index 0000000..986b938 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py @@ -0,0 +1,79 @@ +from abc import ABC, abstractmethod +from typing import Optional + +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo + + +class ChunksInterface(ABC): + @abstractmethod + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + pass + + @abstractmethod + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + pass + + @abstractmethod + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + pass + + @abstractmethod + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + pass + + @abstractmethod + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + @abstractmethod + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py new file mode 100644 index 0000000..d5e3eeb --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py @@ -0,0 +1,125 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from frostfs_testlib.storage.cluster import Cluster, ClusterNode + + +class ContainerInterface(ABC): + @abstractmethod + def create( + self, + endpoint: str, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, + attributes: Optional[dict] = None, + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, + name: Optional[str] = None, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + """ + Create a new container and register it in the FrostFS. + It will be stored in the sidechain when the Inner Ring accepts it. + """ + raise NotImplementedError("No implemethed method create") + + @abstractmethod + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ) -> List[str]: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + """ + raise NotImplementedError("No implemethed method delete") + + @abstractmethod + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get container field info.""" + raise NotImplementedError("No implemethed method get") + + @abstractmethod + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get extended ACL table of container.""" + raise NotImplementedError("No implemethed method get-eacl") + + @abstractmethod + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + **params, + ) -> List[str]: + """List all created containers.""" + raise NotImplementedError("No implemethed method list") + + @abstractmethod + def nodes( + self, + endpoint: str, + cid: str, + cluster: Cluster, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + """Show the nodes participating in the container in the current epoch.""" + raise NotImplementedError("No implemethed method nodes") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py new file mode 100644 index 0000000..3f0a341 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py @@ -0,0 +1,89 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo + + +class NetmapInterface(ABC): + @abstractmethod + def epoch( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = False, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> int: + """ + Get current epoch number. + """ + raise NotImplementedError("No implemethed method epoch") + + @abstractmethod + def netinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetInfo: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method netinfo") + + @abstractmethod + def nodeinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetmapInfo: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method nodeinfo") + + @abstractmethod + def snapshot( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[NodeNetmapInfo]: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method snapshot") + + @abstractmethod + def snapshot_one_node( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[NodeNetmapInfo]: + """ + Get target one node info. + """ + raise NotImplementedError("No implemethed method snapshot") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py new file mode 100644 index 0000000..550c461 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py @@ -0,0 +1,223 @@ +from abc import ABC, abstractmethod +from typing import Any, List, Optional + +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.utils import file_utils + +from .chunks import ChunksInterface + + +class ObjectInterface(ABC): + def __init__(self) -> None: + self.chunks: ChunksInterface + + @abstractmethod + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> file_utils.TestFile: + pass + + @abstractmethod + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def hash( + self, + endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult | Any: + pass + + @abstractmethod + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: Optional[list[str]] = None, + payloads: Optional[list[str]] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ) -> str: + pass + + @abstractmethod + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> tuple[file_utils.TestFile, bytes]: + pass + + @abstractmethod + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + oid: Optional[str] = None, + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, + ) -> List: + pass + + @abstractmethod + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + pass + + @abstractmethod + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[str]: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py new file mode 100644 index 0000000..6574012 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py @@ -0,0 +1,10 @@ +from abc import ABC + +from . import interfaces + + +class GrpcClientWrapper(ABC): + def __init__(self) -> None: + self.object: interfaces.ObjectInterface + self.container: interfaces.ContainerInterface + self.netmap: interfaces.NetmapInterface From 87afc4b58c070d35643f95efd0e5db27eeb6fab6 Mon Sep 17 00:00:00 2001 From: Dmitry Anurin Date: Tue, 4 Feb 2025 10:03:58 +0300 Subject: [PATCH 254/274] [#356] Added pprof endpoint and working dir to service attributes Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/storage/constants.py | 2 ++ src/frostfs_testlib/storage/dataclasses/node_base.py | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 39c6b66..2e49208 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -5,6 +5,7 @@ class ConfigAttributes: WALLET_CONFIG = "wallet_config" CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" + WORKING_DIR = "working_dir" SHARD_CONFIG_PATH = "shard_config_path" LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" @@ -15,6 +16,7 @@ class ConfigAttributes: ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_PROMETHEUS = "endpoint_prometheus" + ENDPOINT_PPROF = "endpoint_pprof" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 180877d..5c8b723 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -82,6 +82,9 @@ class NodeBase(HumanReadableABC): def get_metrics_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) + def get_pprof_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF) + def stop_service(self, mask: bool = True): if mask: with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): @@ -144,6 +147,13 @@ class NodeBase(HumanReadableABC): else None ) + def get_working_dir_path(self) -> Optional[str]: + """ + Returns working directory path located on remote host + """ + config_attributes = self.host.get_service_config(self.name) + return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None + @property def config_dir(self) -> str: return self._get_attribute(ConfigAttributes.CONFIG_DIR) From e9bc36b3d3063043e2b754fbccbde53e93e3785a Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 4 Feb 2025 16:39:34 +0300 Subject: [PATCH 255/274] [#355] Change CSC time methods Signed-off-by: Dmitriy Zayakin --- .../controllers/cluster_state_controller.py | 34 +++++-------------- 1 file changed, 8 insertions(+), 26 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 3a10ded..6370033 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,7 +1,7 @@ -import datetime import itertools import logging import time +from datetime import datetime, timezone from typing import TypeVar import frostfs_testlib.resources.optionals as optionals @@ -390,31 +390,23 @@ class ClusterStateController: @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: shell = node.host.get_shell() - return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") + return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") @reporter.step("Set node time to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() - shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") - shell.exec("hwclock --systohc") + in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") + shell.exec(f"timedatectl set-time '{in_date_frmt}'") node_time = self.get_node_date(node) with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): - assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) + assert (node_time - in_date).total_seconds() < 60 - @reporter.step(f"Restore time") + @reporter.step("Restore time") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() - now_time = datetime.datetime.now(datetime.timezone.utc) + now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") with reporter.step(f"Set {now_time} time"): - shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") - shell.exec("hwclock --systohc") - - @reporter.step("Change the synchronizer status to {status}") - def set_sync_date_all_nodes(self, status: str): - if status == "active": - parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) - return - parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) + shell.exec(f"timedatectl set-time '{now_time}'") @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: @@ -500,16 +492,6 @@ class ClusterStateController: frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) return frostfs_adm, frostfs_cli, frostfs_cli_remote - def _enable_date_synchronizer(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("timedatectl set-ntp true") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15) - - def _disable_date_synchronizer(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("timedatectl set-ntp false") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15) - def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: disk_controller_id = DiskController.get_id(node, device) if disk_controller_id in self.detached_disks.keys(): From 97b9b5498af883d2dd111aa17b916d2aba36429e Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 21 Feb 2025 16:27:13 +0300 Subject: [PATCH 256/274] [#358] Add minor improvements for convenient work with clients Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/clients/http/__init__.py | 1 + src/frostfs_testlib/clients/s3/__init__.py | 4 ++- .../clients/s3/aws_cli_client.py | 6 +++-- .../clients/s3/boto3_client.py | 20 ++++++-------- src/frostfs_testlib/clients/s3/interfaces.py | 26 ++++++++++++------- .../resources/error_patterns.py | 1 + 6 files changed, 34 insertions(+), 24 deletions(-) diff --git a/src/frostfs_testlib/clients/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py index e69de29..ab6e2b0 100644 --- a/src/frostfs_testlib/clients/http/__init__.py +++ b/src/frostfs_testlib/clients/http/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.clients.http.http_client import HttpClient diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py index 65a3990..5481f48 100644 --- a/src/frostfs_testlib/clients/s3/__init__.py +++ b/src/frostfs_testlib/clients/s3/__init__.py @@ -1 +1,3 @@ -from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper, VersioningStatus +from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index accc289..8b2d774 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -33,12 +33,14 @@ class AwsCliClient(S3ClientWrapper): self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.s3gate_endpoint = s3gate_endpoint + self.iam_endpoint = None + self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.profile = profile - self.local_shell = LocalShell() self.region = region - self.iam_endpoint = None + + self.local_shell = LocalShell() try: _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 890b4e9..9d9fefe 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -35,26 +35,20 @@ class Boto3ClientWrapper(S3ClientWrapper): def __init__( self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: - self.boto3_client: S3Client = None self.s3gate_endpoint: str = "" + self.boto3_client: S3Client = None - self.boto3_iam_client: S3Client = None self.iam_endpoint: str = "" - + self.boto3_iam_client: S3Client = None self.boto3_sts_client: S3Client = None - self.access_key_id: str = access_key_id - self.secret_access_key: str = secret_access_key + self.access_key_id = access_key_id + self.secret_access_key = secret_access_key self.profile = profile self.region = region self.session = boto3.Session() - self.config = Config( - retries={ - "max_attempts": MAX_REQUEST_ATTEMPTS, - "mode": RETRY_MODE, - } - ) + self.config = Config(retries={"max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE}) self.set_endpoint(s3gate_endpoint) @@ -90,7 +84,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint_url=self.iam_endpoint, verify=False, ) - # since the STS does not have an enpoint, IAM is used + # since the STS does not have an endpoint, IAM is used self.boto3_sts_client = self.session.client( service_name="sts", aws_access_key_id=self.access_key_id, @@ -145,6 +139,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params = {"Bucket": bucket} if object_lock_enabled_for_bucket is not None: params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) + if acl is not None: params.update({"ACL": acl}) elif grant_write or grant_read or grant_full_control: @@ -154,6 +149,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params.update({"GrantRead": grant_read}) elif grant_full_control: params.update({"GrantFullControl": grant_full_control}) + if location_constraint: params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py index 7ce9f31..d636182 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -22,15 +22,15 @@ class VersioningStatus(HumanReadableEnum): SUSPENDED = "Suspended" -ACL_COPY = [ - "private", - "public-read", - "public-read-write", - "authenticated-read", - "aws-exec-read", - "bucket-owner-read", - "bucket-owner-full-control", -] +class ACL: + PRIVATE = "private" + PUBLIC_READ = "public-read" + PUBLIC_READ_WRITE = "public-read-write" + AUTHENTICATED_READ = "authenticated-read" + AWS_EXEC_READ = "aws-exec-read" + BUCKET_OWNER_READ = "bucket-owner-read" + BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control" + LOG_DELIVERY_WRITE = "log-delivery-write" class BucketContainerResolver(ABC): @@ -50,6 +50,14 @@ class BucketContainerResolver(ABC): class S3ClientWrapper(HumanReadableABC): + access_key_id: str + secret_access_key: str + profile: str + region: str + + s3gate_endpoint: str + iam_endpoint: str + @abstractmethod def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: pass diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 4c22648..6c0cb14 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -1,5 +1,6 @@ # Regex patterns of status codes of Container service CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" +SUBJECT_NOT_FOUND = "code = 1024.*message = frostfs error: chain/client.*subject not found.*" # Regex patterns of status codes of Object service MALFORMED_REQUEST = "code = 1024.*message = malformed request" From b00d080982804c8c9237a49a606dbf6fc4ef03f1 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 25 Feb 2025 16:43:34 +0300 Subject: [PATCH 257/274] [#357] Synchronize client and CliCommand timeouts Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/cli_command.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 3600e77..7fccc65 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -24,9 +24,7 @@ class CliCommand: def __init__(self, shell: Shell, cli_exec_path: str, **base_params): self.shell = shell self.cli_exec_path = cli_exec_path - self.__base_params = " ".join( - [f"--{param} {value}" for param, value in base_params.items() if value] - ) + self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value]) def _format_command(self, command: str, **params) -> str: param_str = [] @@ -48,9 +46,7 @@ class CliCommand: val_str = str(value_item).replace("'", "\\'") param_str.append(f"--{param} '{val_str}'") elif isinstance(value, dict): - param_str.append( - f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'' - ) + param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'') else: if "'" in str(value): value_str = str(value).replace('"', '\\"') @@ -63,12 +59,18 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - return self.shell.exec(self._format_command(command, **params)) - - def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: + timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None return self.shell.exec( self._format_command(command, **params), - options=CommandOptions( - interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)] + CommandOptions(timeout=timeout), + ) + + def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: + timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None + return self.shell.exec( + self._format_command(command, **params), + CommandOptions( + interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)], + timeout=timeout, ), ) From f1073d214cc300ede89cfd05907039511a1970f0 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Wed, 5 Mar 2025 15:29:35 +0300 Subject: [PATCH 258/274] [#360] Increased timeout for IAM policy attach/detach Signed-off-by: Yaroslava Lukoyanova --- src/frostfs_testlib/clients/s3/aws_cli_client.py | 12 ++++++------ src/frostfs_testlib/clients/s3/boto3_client.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index 8b2d774..a2e3fc7 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -979,7 +979,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -990,7 +990,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1122,7 +1122,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1133,7 +1133,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1352,7 +1352,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1367,7 +1367,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 9d9fefe..4157bd6 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -836,7 +836,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Attaches the specified managed policy to the specified user") @@ -848,7 +848,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") @@ -979,7 +979,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Removes the specified managed policy from the specified user") @@ -991,7 +991,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") @@ -1201,7 +1201,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") @@ -1216,7 +1216,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Removes the specified user from the specified group") From 0c4e601840d81ceef400e334b3d3bcd8bee4592e Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 3 Mar 2025 14:54:22 +0300 Subject: [PATCH 259/274] [#359] Override represantation method for Host Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/hosting/interfaces.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index f58d856..a41161c 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -29,6 +29,9 @@ class Host(ABC): self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} + def __repr__(self) -> str: + return self.config.address + @property def config(self) -> HostConfig: """Returns config of the host. From 7d2c92ebc096dc378666dce09d26cfd0a0313d2f Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 7 Mar 2025 15:18:43 +0300 Subject: [PATCH 260/274] [#361] Move common fixture to testlib Signed-off-by: a.berezin --- src/frostfs_testlib/__init__.py | 2 +- src/frostfs_testlib/fixtures.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index e557a79..4724a8b 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,4 +1,4 @@ __version__ = "2.0.1" -from .fixtures import configure_testlib, hosting, temp_directory +from .fixtures import configure_testlib, hosting, session_start_time, temp_directory from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py index d0f92f2..7d767d2 100644 --- a/src/frostfs_testlib/fixtures.py +++ b/src/frostfs_testlib/fixtures.py @@ -1,5 +1,6 @@ import logging import os +from datetime import datetime from importlib.metadata import entry_points import pytest @@ -11,6 +12,12 @@ from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE from frostfs_testlib.storage import get_service_registry +@pytest.fixture(scope="session", autouse=True) +def session_start_time(): + start_time = datetime.utcnow() + return start_time + + @pytest.fixture(scope="session") def configure_testlib(): reporter.get_reporter().register_handler(reporter.AllureHandler()) From c2af1bba5c300b1bb1758eaa19f687962ef98224 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 7 Mar 2025 18:14:38 +0300 Subject: [PATCH 261/274] [#362] Add functions to change date on nodes in `ClusterStateController` Signed-off-by: Kirill Sosnovskikh --- .../controllers/cluster_state_controller.py | 61 +++++++------------ 1 file changed, 22 insertions(+), 39 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 6370033..51aaefb 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -247,23 +247,20 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() - # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop all storage services on cluster") - def stop_all_storage_services(self, reversed_order: bool = False): - nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + @reporter.step("Restart {service_type} service on {node}") + def restart_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.restart_service() - for node in nodes: - self.stop_service_of_type(node, StorageNode) - - # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop all S3 gates on cluster") - def stop_all_s3_gates(self, reversed_order: bool = False): - nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + @reporter.step("Restart all {service_type} services") + def restart_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.restart_service for service in services]) - for node in nodes: - self.stop_service_of_type(node, S3Gate) + if service_type == StorageNode: + self.wait_after_storage_startup() # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -277,30 +274,6 @@ class ClusterStateController: def start_storage_service(self, node: ClusterNode): self.start_service_of_type(node, StorageNode) - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start stopped storage services") - def start_stopped_storage_services(self): - self.start_stopped_services_of_type(StorageNode) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop s3 gate on {node}") - def stop_s3_gate(self, node: ClusterNode, mask: bool = True): - self.stop_service_of_type(node, S3Gate, mask) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start s3 gate on {node}") - def start_s3_gate(self, node: ClusterNode): - self.start_service_of_type(node, S3Gate) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start stopped S3 gates") - def start_stopped_s3_gates(self): - self.start_stopped_services_of_type(S3Gate) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): @@ -392,19 +365,29 @@ class ClusterStateController: shell = node.host.get_shell() return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") - @reporter.step("Set node time to {in_date}") + @reporter.step("Set time on nodes in {in_date}") + def change_date_on_all_nodes(self, cluster: Cluster, in_date: datetime) -> None: + parallel(self.change_node_date, cluster.cluster_nodes, in_date=in_date) + + @reporter.step("Set time on {node} to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") shell.exec(f"timedatectl set-time '{in_date_frmt}'") node_time = self.get_node_date(node) + with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): assert (node_time - in_date).total_seconds() < 60 - @reporter.step("Restore time") + @reporter.step("Restore time on nodes") + def restore_date_on_all_nodes(self, cluster: Cluster) -> None: + parallel(self.restore_node_date, cluster.cluster_nodes) + + @reporter.step("Restore time on {node}") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + with reporter.step(f"Set {now_time} time"): shell.exec(f"timedatectl set-time '{now_time}'") From dfb048fe519f6ab72d59453569ead9cf2e93cafa Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 11 Mar 2025 17:22:13 +0300 Subject: [PATCH 262/274] [#363] Add accounting for timeout inaccuracy between process and cli Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/cli_command.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 7fccc65..224e9e3 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -1,10 +1,11 @@ from typing import Optional from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell +from frostfs_testlib.utils.datetime_utils import parse_time class CliCommand: - + TIMEOUT_INACCURACY = 10 WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" @@ -59,14 +60,18 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None + if timeout := params.get("timeout"): + timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY + return self.shell.exec( self._format_command(command, **params), CommandOptions(timeout=timeout), ) def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: - timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None + if timeout := params.get("timeout"): + timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY + return self.shell.exec( self._format_command(command, **params), CommandOptions( From 3966f65c95cbad9f5adc99d9c396178008409c37 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 17 Mar 2025 16:24:36 +0300 Subject: [PATCH 263/274] [#364] Fixed hook order tests collection Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/hooks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index 1ada660..c56c75a 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -16,6 +16,9 @@ def pytest_add_frostfs_marker(items: list[pytest.Item]): # pytest hook. Do not rename @pytest.hookimpl(trylast=True) def pytest_collection_modifyitems(items: list[pytest.Item]): + # The order of running tests corresponded to the suites + items.sort(key=lambda item: item.nodeid) + # Change order of tests based on @pytest.mark.order() marker def order(item: pytest.Item) -> int: order_marker = item.get_closest_marker("order") From dcde9e15b104602f117e6ed352f30726601d8545 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 13 Mar 2025 16:53:42 +0300 Subject: [PATCH 264/274] [#365] Change type hint for `NetmapOperations.nodeinfo` Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/netmap_parser.py | 2 +- .../storage/grpc_operations/implementations/netmap.py | 4 ++-- .../storage/grpc_operations/interfaces/netmap.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 2c97c3a..4b4a501 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -89,7 +89,7 @@ class NetmapParser: return snapshot @staticmethod - def node_info(output: dict) -> NodeNetmapInfo: + def node_info(output: dict) -> NodeInfo: data_dict = {"attributes": {}} for key, value in output.items(): diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py index 905171b..76ee69a 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py @@ -4,7 +4,7 @@ from typing import List, Optional from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo from .. import interfaces @@ -86,7 +86,7 @@ class NetmapOperations(interfaces.NetmapInterface): trace: Optional[bool] = True, xhdr: Optional[dict] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> NodeNetmapInfo: + ) -> NodeInfo: """ Get target node info. """ diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py index 3f0a341..3fdc98a 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py @@ -1,7 +1,7 @@ from abc import ABC, abstractmethod from typing import List, Optional -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo class NetmapInterface(ABC): @@ -50,7 +50,7 @@ class NetmapInterface(ABC): ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, - ) -> NodeNetmapInfo: + ) -> NodeInfo: """ Get target node info. """ From 91a2706b06f2bb5d00f0ef60ef5bf1e2c55ece3a Mon Sep 17 00:00:00 2001 From: anurindm Date: Wed, 19 Mar 2025 11:43:21 +0300 Subject: [PATCH 265/274] [#366] Test order depends on location Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index c56c75a..d7e4cc8 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -17,7 +17,7 @@ def pytest_add_frostfs_marker(items: list[pytest.Item]): @pytest.hookimpl(trylast=True) def pytest_collection_modifyitems(items: list[pytest.Item]): # The order of running tests corresponded to the suites - items.sort(key=lambda item: item.nodeid) + items.sort(key=lambda item: item.location[0]) # Change order of tests based on @pytest.mark.order() marker def order(item: pytest.Item) -> int: From 8bedd9b3d6d57b493a93888f35177e58eb35fb0d Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 19 Mar 2025 14:33:25 +0300 Subject: [PATCH 266/274] [#367] Use full date during log Signed-off-by: a.berezin --- src/frostfs_testlib/shell/local_shell.py | 2 +- src/frostfs_testlib/shell/ssh_shell.py | 23 +++++++---------------- src/frostfs_testlib/utils/cli_utils.py | 2 +- 3 files changed, 9 insertions(+), 18 deletions(-) diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 746070f..c0f3b06 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -141,6 +141,6 @@ class LocalShell(Shell): f"RETCODE: {result.return_code}\n\n" f"STDOUT:\n{result.stdout}\n" f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" ) reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index e718b4d..3f13dca 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -68,8 +68,7 @@ class SshConnectionProvider: try: if creds.ssh_key_path: logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " - f"{creds.ssh_key_path} (attempt {attempt})" + f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " f"{creds.ssh_key_path} (attempt {attempt})" ) connection.connect( hostname=host, @@ -79,9 +78,7 @@ class SshConnectionProvider: timeout=self.CONNECTION_TIMEOUT, ) else: - logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})" - ) + logger.info(f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})") connection.connect( hostname=host, port=port, @@ -104,9 +101,7 @@ class SshConnectionProvider: connection.close() can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS if can_retry: - logger.warn( - f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}" - ) + logger.warn(f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}") sleep(self.SSH_ATTEMPTS_INTERVAL) continue logger.exception(f"Can't connect to host {host}") @@ -139,7 +134,7 @@ def log_command(func): f"RC:\n {result.return_code}\n" f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n" f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" ) if not options.no_log: @@ -185,13 +180,11 @@ class SSHShell(Shell): private_key_passphrase: Optional[str] = None, port: str = "22", command_inspectors: Optional[list[CommandInspector]] = None, - custom_environment: Optional[dict] = None + custom_environment: Optional[dict] = None, ) -> None: super().__init__() self.connection_provider = SshConnectionProvider() - self.connection_provider.store_creds( - host, SshCredentials(login, password, private_key_path, private_key_passphrase) - ) + self.connection_provider.store_creds(host, SshCredentials(login, password, private_key_path, private_key_passphrase)) self.host = host self.port = port @@ -220,9 +213,7 @@ class SSHShell(Shell): result = self._exec_non_interactive(command, options) if options.check and result.return_code != 0: - raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n" - ) + raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n") return result @log_command diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 0f9fef2..8787296 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -68,7 +68,7 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" f"RC: {return_code}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {end_time - start_time}" ) with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): reporter.attach(command_attachment, "Command execution") From 6bbc359ec9e653f74aa92346d0ee971e944af3cd Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Thu, 20 Mar 2025 09:05:50 +0300 Subject: [PATCH 267/274] [#368] Fixed function check metrics Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/steps/metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py index a9e545a..0d0950a 100644 --- a/src/frostfs_testlib/steps/metrics.py +++ b/src/frostfs_testlib/steps/metrics.py @@ -6,7 +6,7 @@ from frostfs_testlib.testing.test_control import wait_for_success @reporter.step("Check metrics result") -@wait_for_success(interval=10) +@wait_for_success(max_wait_time=300, interval=10) def check_metrics_counter( cluster_nodes: list[ClusterNode], operator: str = "==", @@ -19,7 +19,7 @@ def check_metrics_counter( counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) assert eval( f"{counter_act} {operator} {counter_exp}" - ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}" + ), f"Actual: {counter_act} {operator} Expected: {counter_exp} in nodes: {cluster_nodes}" @reporter.step("Get metrics value from node: {node}") From c8eec119062001768568d1d0da3e93f7d761dfb8 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 20 Mar 2025 17:11:45 +0300 Subject: [PATCH 268/274] [#369] Set region in S3 STS client Signed-off-by: Yaroslava Lukoyanova --- src/frostfs_testlib/clients/s3/boto3_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 4157bd6..bceecdf 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -90,6 +90,7 @@ class Boto3ClientWrapper(S3ClientWrapper): aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, endpoint_url=iam_endpoint, + region_name=self.region, verify=False, ) From c4ab14fce8acf26907132f91f0b3566edc853bf7 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 21 Mar 2025 20:03:06 +0300 Subject: [PATCH 269/274] [#370] Unify `delete_object_tagging` method in S3 clients Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/clients/s3/boto3_client.py | 2 +- src/frostfs_testlib/clients/s3/interfaces.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index bceecdf..dd13e6f 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -770,7 +770,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("TagSet") @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str) -> None: + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: params = self._convert_to_s3_params(locals()) self._exec_request( self.boto3_client.delete_object_tagging, diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py index d636182..b35d3bf 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -377,7 +377,7 @@ class S3ClientWrapper(HumanReadableABC): """Returns the tag-set of an object.""" @abstractmethod - def delete_object_tagging(self, bucket: str, key: str) -> None: + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: """Removes the entire tag set from the specified object.""" @abstractmethod From d38808a1f55e370d43e868e7551127dea6506998 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 3 Feb 2025 12:44:21 +0300 Subject: [PATCH 270/274] [#354] Support of presigned url methods for S3 Signed-off-by: Yaroslava Lukoyanova --- .../clients/s3/aws_cli_client.py | 9 +++++++++ src/frostfs_testlib/clients/s3/boto3_client.py | 18 +++++++++++++++++- src/frostfs_testlib/clients/s3/interfaces.py | 4 ++++ src/frostfs_testlib/steps/http_gate.py | 4 ++++ 4 files changed, 34 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index a2e3fc7..c1dd6b6 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -959,6 +959,15 @@ class AwsCliClient(S3ClientWrapper): return json_output + @reporter.step("Create presign url for the object") + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + # AWS CLI does not support method definition and world only in 'get_object' state by default + cmd = f"aws {self.common_flags} s3 presign s3://{bucket}/{key} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + if expires_in: + cmd += f" --expires-in {expires_in}" + response = self.local_shell.exec(cmd).stdout + return response.strip() + # IAM METHODS # # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index dd13e6f..0c4e8e4 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -48,7 +48,13 @@ class Boto3ClientWrapper(S3ClientWrapper): self.region = region self.session = boto3.Session() - self.config = Config(retries={"max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE}) + self.config = Config( + signature_version="s3v4", + retries={ + "max_attempts": MAX_REQUEST_ATTEMPTS, + "mode": RETRY_MODE, + }, + ) self.set_endpoint(s3gate_endpoint) @@ -813,6 +819,16 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> dict: raise NotImplementedError("Cp is not supported for boto3 client") + @reporter.step("Create presign url for the object") + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + response = self._exec_request( + method=self.boto3_client.generate_presigned_url, + params={"ClientMethod": method, "Params": {"Bucket": bucket, "Key": key}, "ExpiresIn": expires_in}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response + # END OBJECT METHODS # # IAM METHODS # diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py index b35d3bf..0d03a28 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -425,6 +425,10 @@ class S3ClientWrapper(HumanReadableABC): ) -> dict: """cp directory TODO: Add proper description""" + @abstractmethod + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + """Creates presign URL""" + # END OF OBJECT METHODS # # IAM METHODS # diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http_gate.py index 51b0301..aa4abf2 100644 --- a/src/frostfs_testlib/steps/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -33,6 +33,7 @@ def get_via_http_gate( oid: str, node: ClusterNode, request_path: Optional[str] = None, + presigned_url: Optional[str] = None, timeout: Optional[int] = 300, ): """ @@ -47,6 +48,9 @@ def get_via_http_gate( if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" + if presigned_url: + request = presigned_url + response = requests.get(request, stream=True, timeout=timeout, verify=False) if not response.ok: From 80226ee0a8c2e309394bc7de13f0dba794e4fad6 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 27 Mar 2025 15:25:24 +0300 Subject: [PATCH 271/274] [#371] Add IAM and STS clients to boto3-stubs Signed-off-by: Kirill Sosnovskikh --- pyproject.toml | 2 +- requirements.txt | 4 ++-- src/frostfs_testlib/clients/s3/boto3_client.py | 6 ++++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2778f8a..d62f04b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ dependencies = [ "pytest==7.1.2", "tenacity==8.0.1", "boto3==1.35.30", - "boto3-stubs[essential]==1.35.30", + "boto3-stubs[s3,iam,sts]==1.35.30", ] requires-python = ">=3.10" diff --git a/requirements.txt b/requirements.txt index a0bcc11..56d9b83 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ testrail-api==1.12.0 tenacity==8.0.1 pytest==7.1.2 boto3==1.35.30 -boto3-stubs[essential]==1.35.30 +boto3-stubs[s3,iam,sts]==1.35.30 pydantic==2.10.6 # Dev dependencies @@ -22,4 +22,4 @@ pylint==2.17.4 # Packaging dependencies build==0.8.0 setuptools==65.3.0 -twine==4.0.1 +twine==4.0.1 \ No newline at end of file diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 0c4e8e4..ac4d55b 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -10,7 +10,9 @@ import boto3 import urllib3 from botocore.config import Config from botocore.exceptions import ClientError +from mypy_boto3_iam import IAMClient from mypy_boto3_s3 import S3Client +from mypy_boto3_sts import STSClient from frostfs_testlib import reporter from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict @@ -39,8 +41,8 @@ class Boto3ClientWrapper(S3ClientWrapper): self.boto3_client: S3Client = None self.iam_endpoint: str = "" - self.boto3_iam_client: S3Client = None - self.boto3_sts_client: S3Client = None + self.boto3_iam_client: IAMClient = None + self.boto3_sts_client: STSClient = None self.access_key_id = access_key_id self.secret_access_key = secret_access_key From aab4d4f657590dcb1be0231b477862192d51c33c Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 15 Apr 2025 12:26:35 +0300 Subject: [PATCH 272/274] [#373] Add step to httpClient for log write Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/clients/http/http_client.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py index aebd5ef..16d7707 100644 --- a/src/frostfs_testlib/clients/http/http_client.py +++ b/src/frostfs_testlib/clients/http/http_client.py @@ -15,14 +15,14 @@ LOGGING_CONFIG = { "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, "formatters": { "http": { - "format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s", + "format": "%(asctime)s [%(levelname)s] %(name)s - %(message)s", "datefmt": "%Y-%m-%d %H:%M:%S", } }, "loggers": { "httpx": { "handlers": ["default"], - "level": "DEBUG", + "level": "ERROR", }, "httpcore": { "handlers": ["default"], @@ -43,7 +43,7 @@ class HttpClient: response = client.request(method, url, **kwargs) self._attach_response(response, **kwargs) - logger.info(f"Response: {response.status_code} => {response.text}") + # logger.info(f"Response: {response.status_code} => {response.text}") if expected_status_code: assert ( @@ -131,6 +131,7 @@ class HttpClient: reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") + cls._write_log(curl_request, response_body, response.status_code) @classmethod def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: @@ -143,3 +144,9 @@ class HttpClient: # Option -k means no verify SSL return f"curl {url} -X {method} {headers}{data} -k" + + @classmethod + def _write_log(cls, curl: str, res_body: str, res_code: int) -> None: + if res_body: + curl += f"\nResponse: {res_code}\n{res_body}" + logger.info(f"{curl}") From 9ad620121e3871f9eab4e5afd3495197541a90a9 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Wed, 9 Apr 2025 16:15:46 +0300 Subject: [PATCH 273/274] [#372] Added decorator wait until stabilization metric values Signed-off-by: Ilyas Niyazov --- .../storage/dataclasses/metrics.py | 48 ++++++++++++++++++- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py index 81e757c..8969015 100644 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -1,3 +1,9 @@ +import time +from functools import wraps +from typing import Callable + +import pytest + from frostfs_testlib.hosting import Host from frostfs_testlib.shell.interfaces import CommandResult @@ -7,11 +13,11 @@ class Metrics: self.storage = StorageMetrics(host, metrics_endpoint) - class StorageMetrics: """ Class represents storage metrics in a cluster """ + def __init__(self, host: Host, metrics_endpoint: str) -> None: self.host = host self.metrics_endpoint = metrics_endpoint @@ -29,8 +35,46 @@ class StorageMetrics: additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") return result - + def get_all_metrics(self) -> CommandResult: shell = self.host.get_shell() result = shell.exec(f"curl -s {self.metrics_endpoint}") return result + + +def wait_until_metric_result_is_stable( + relative_deviation: float = None, absolute_deviation: int = None, max_attempts: int = 10, sleep_interval: int = 30 +): + """ + A decorator function that repeatedly calls the decorated function until its result stabilizes + within a specified relative tolerance or until the maximum number of attempts is reached. + + This decorator is useful for scenarios where a function returns a metric or value that may fluctuate + over time, and you want to ensure that the result has stabilized before proceeding. + """ + + def decorator(func: Callable): + @wraps(func) + def wrapper(*args, **kwargs): + last_result = None + for _ in range(max_attempts): + # first function call + first_result = func(*args, **kwargs) + + # waiting before the second call + time.sleep(sleep_interval) + + # second function call + last_result = func(*args, **kwargs) + + # checking value stability + if first_result == pytest.approx(last_result, rel=relative_deviation, abs=absolute_deviation): + return last_result + + # if stability is not achieved, return the last value + if last_result is not None: + return last_result + + return wrapper + + return decorator From 517a7b932261a142b2a86b2687843d8fc9651ce0 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Mon, 28 Apr 2025 18:43:44 +0300 Subject: [PATCH 274/274] [#377] Update text for "subject not found" error Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/resources/error_patterns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 6c0cb14..15e2977 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -1,6 +1,6 @@ # Regex patterns of status codes of Container service CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" -SUBJECT_NOT_FOUND = "code = 1024.*message = frostfs error: chain/client.*subject not found.*" +SUBJECT_NOT_FOUND = "code = 1024.*message =.*chain/client.*subject not found.*" # Regex patterns of status codes of Object service MALFORMED_REQUEST = "code = 1024.*message = malformed request"