From 85c2707ec807a4220504a40bcf1655e2aefe4869 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 28 Aug 2024 12:12:05 +0300 Subject: [PATCH 01/81] [#284] Add container operational in CliWrapper Signed-off-by: Dmitriy Zayakin --- .../cli/frostfs_cli/container.py | 20 ++ .../implementations/container.py | 247 ++++++++++++++++-- .../storage/grpc_operations/interfaces.py | 148 +++++++++-- 3 files changed, 377 insertions(+), 38 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 1ff217f..8bcbe9e 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -16,6 +16,8 @@ class FrostfsCliContainer(CliCommand): basic_acl: Optional[str] = None, await_mode: bool = False, disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, nonce: Optional[str] = None, policy: Optional[str] = None, @@ -37,6 +39,8 @@ class FrostfsCliContainer(CliCommand): basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write', 'private', 'eacl-public-read' (default "private"). disable_timestamp: Disable timestamp container attribute. + force: Skip placement validity check. + trace: Generate trace ID and print it. name: Container name attribute. nonce: UUIDv4 nonce value for container. policy: QL-encoded or JSON-encoded placement policy or path to file with it. @@ -69,6 +73,7 @@ class FrostfsCliContainer(CliCommand): ttl: Optional[int] = None, xhdr: Optional[dict] = None, force: bool = False, + trace: bool = False, ) -> CommandResult: """ Delete an existing container. @@ -78,6 +83,7 @@ class FrostfsCliContainer(CliCommand): address: Address of wallet account. await_mode: Block execution until container is removed. cid: Container ID. + trace: Generate trace ID and print it. force: Do not check whether container contains locks and remove immediately. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Path to a JSON-encoded container session token. @@ -104,6 +110,7 @@ class FrostfsCliContainer(CliCommand): await_mode: bool = False, to: Optional[str] = None, json_mode: bool = False, + trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -116,6 +123,7 @@ class FrostfsCliContainer(CliCommand): await_mode: Block execution until container is removed. cid: Container ID. json_mode: Print or dump container in JSON format. + trace: Generate trace ID and print it. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. ttl: TTL value in request meta header (default 2). @@ -155,6 +163,8 @@ class FrostfsCliContainer(CliCommand): cid: Container ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. + json_mode: Print or dump container in JSON format. + trace: Generate trace ID and print it. session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. @@ -174,6 +184,7 @@ class FrostfsCliContainer(CliCommand): def list( self, rpc_endpoint: str, + name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, generate_key: Optional[bool] = None, @@ -188,11 +199,13 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. + name: List containers by the attribute name. owner: Owner of containers (omit to use owner from private key). rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). generate_key: Generate a new private key. @@ -208,9 +221,11 @@ class FrostfsCliContainer(CliCommand): self, rpc_endpoint: str, cid: str, + bearer: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, generate_key: Optional[bool] = None, + trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -221,10 +236,12 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. cid: Container ID. + bearer: File with signed JSON or binary encoded bearer token. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). generate_key: Generate a new private key. @@ -236,6 +253,7 @@ class FrostfsCliContainer(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) + # TODO Deprecated method with 0.42 def set_eacl( self, rpc_endpoint: str, @@ -281,6 +299,7 @@ class FrostfsCliContainer(CliCommand): address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, + trace: bool = False, short: Optional[bool] = True, xhdr: Optional[dict] = None, generate_key: Optional[bool] = None, @@ -298,6 +317,7 @@ class FrostfsCliContainer(CliCommand): from_file: string File path with encoded container timeout: duration Timeout for the operation (default 15 s) short: shorten the output of node information. + trace: Generate trace ID and print it. xhdr: Dict with request X-Headers. generate_key: Generate a new private key. diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index cac2df4..c8360ea 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -1,11 +1,16 @@ +import json import logging -from typing import Optional +import re +from typing import List, Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.constants import PlacementRule +from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.utils import json_utils logger = logging.getLogger("NeoLogger") @@ -18,13 +23,22 @@ class ContainerOperations(interfaces.ContainerInterface): def create( self, endpoint: str, - rule: str = PlacementRule.DEFAULT_PLACEMENT_RULE, - basic_acl: str = "", + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, attributes: Optional[dict] = None, - session_token: str = "", + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, - options: Optional[dict] = None, - await_mode: bool = True, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> str: """ @@ -54,14 +68,23 @@ class ContainerOperations(interfaces.ContainerInterface): """ result = self.cli.container.create( rpc_endpoint=endpoint, - policy=rule, - basic_acl=basic_acl, + policy=policy, + nns_zone=nns_zone, + nns_name=nns_name, + address=address, attributes=attributes, - name=name, - session=session_token, + basic_acl=basic_acl, await_mode=await_mode, + disable_timestamp=disable_timestamp, + force=force, + trace=trace, + name=name, + nonce=nonce, + session=session, + subnet=subnet, + ttl=ttl, + xhdr=xhdr, timeout=timeout, - **options or {}, ) cid = self._parse_cid(result.stdout) @@ -71,21 +94,215 @@ class ContainerOperations(interfaces.ContainerInterface): return cid @reporter.step("List Containers") - def list(self, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + **params, + ) -> List[str]: """ A wrapper for `frostfs-cli container list` call. It returns all the available containers for the given wallet. Args: - wallet (WalletInfo): a wallet on whose behalf we list the containers shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key timeout: Timeout for the operation. Returns: (list): list of containers """ - result = self.cli.container.list(rpc_endpoint=endpoint, timeout=timeout) + result = self.cli.container.list( + rpc_endpoint=endpoint, + name=name, + address=address, + generate_key=generate_key, + owner=owner, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + **params, + ) return result.stdout.split() + @reporter.step("List Objects in container") + def list_objects( + self, + endpoint: str, + cid: str, + bearer: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[str]: + """ + A wrapper for `frostfs-cli container list-objects` call. It returns all the + available objects in container. + Args: + container_id: cid of container + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + result = self.cli.container.list_objects( + rpc_endpoint=endpoint, + cid=cid, + bearer=bearer, + wallet=wallet, + address=address, + generate_key=generate_key, + trace=trace, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + logger.info(f"Container objects: \n{result}") + return result.stdout.split() + + @reporter.step("Delete container") + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ): + try: + return self.cli.container.delete( + rpc_endpoint=endpoint, + cid=cid, + address=address, + await_mode=await_mode, + session=session, + ttl=ttl, + xhdr=xhdr, + force=force, + trace=trace, + ).stdout + except RuntimeError as e: + print(f"Error request:\n{e}") + + @reporter.step("Get container") + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> Union[dict, str]: + result = self.cli.container.get( + rpc_endpoint=endpoint, + cid=cid, + address=address, + generate_key=generate_key, + await_mode=await_mode, + to=to, + json_mode=json_mode, + trace=trace, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + container_info = json.loads(result.stdout) + attributes = dict() + for attr in container_info["attributes"]: + attributes[attr["key"]] = attr["value"] + container_info["attributes"] = attributes + container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"]) + return container_info + + @reporter.step("Get eacl container") + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.container.get_eacl( + rpc_endpoint=endpoint, + cid=cid, + address=address, + generate_key=generate_key, + await_mode=await_mode, + to=to, + session=session, + ttl=ttl, + xhdr=xhdr, + timeout=CLI_DEFAULT_TIMEOUT, + ).stdout + + @reporter.step("Get nodes container") + def nodes( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[ClusterNode]: + result = self.cli.container.search_node( + rpc_endpoint=endpoint, + cid=cid, + address=address, + ttl=ttl, + from_file=from_file, + trace=trace, + short=short, + xhdr=xhdr, + generate_key=generate_key, + timeout=timeout, + ).stdout + + pattern = r"[0-9]+(?:\.[0-9]+){3}" + nodes_ip = list(set(re.findall(pattern, result))) + + with reporter.step(f"nodes ips = {nodes_ip}"): + nodes_list = cluster.get_nodes_by_ip(nodes_ip) + + with reporter.step(f"Return nodes - {nodes_list}"): + return nodes_list + + @reporter.step("Resolve container by name") + def resolve_container_by_name(name: str, node: ClusterNode): + resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) + resolver: BucketContainerResolver = resolver_cls() + return resolver.resolve(node, name) + def _parse_cid(self, output: str) -> str: """ Parses container ID from a given CLI output. The input string we expect: diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py index c39accc..1947435 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -1,7 +1,6 @@ from abc import ABC, abstractmethod -from typing import Any, Optional +from typing import Any, List, Optional -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.constants import PlacementRule @@ -96,7 +95,7 @@ class ObjectInterface(ABC): bearer: str = "", xhdr: Optional[dict] = None, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -111,7 +110,7 @@ class ObjectInterface(ABC): xhdr: Optional[dict] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> file_utils.TestFile: pass @@ -126,14 +125,14 @@ class ObjectInterface(ABC): xhdr: Optional[dict] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @abstractmethod def hash( self, - rpc_endpoint: str, + endpoint: str, cid: str, oid: str, address: Optional[str] = None, @@ -145,7 +144,7 @@ class ObjectInterface(ABC): session: Optional[str] = None, hash_type: Optional[str] = None, xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -161,7 +160,7 @@ class ObjectInterface(ABC): is_raw: bool = False, is_direct: bool = False, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> CommandResult | Any: pass @@ -178,7 +177,7 @@ class ObjectInterface(ABC): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -195,7 +194,7 @@ class ObjectInterface(ABC): expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -212,7 +211,7 @@ class ObjectInterface(ABC): expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -226,7 +225,7 @@ class ObjectInterface(ABC): bearer: str = "", xhdr: Optional[dict] = None, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> tuple[file_utils.TestFile, bytes]: pass @@ -242,8 +241,8 @@ class ObjectInterface(ABC): session: Optional[str] = None, phy: bool = False, root: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list: + timeout: Optional[str] = None, + ) -> List: pass @abstractmethod @@ -257,8 +256,8 @@ class ObjectInterface(ABC): xhdr: Optional[dict] = None, is_direct: bool = False, verify_presence_all: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list[ClusterNode]: + timeout: Optional[str] = None, + ) -> List[ClusterNode]: pass @@ -267,16 +266,119 @@ class ContainerInterface(ABC): def create( self, endpoint: str, - rule: str = PlacementRule.DEFAULT_PLACEMENT_RULE, - basic_acl: str = "", + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, attributes: Optional[dict] = None, - session_token: str = "", + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, - options: Optional[dict] = None, - await_mode: bool = True, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> str: - pass + """ + Create a new container and register it in the FrostFS. + It will be stored in the sidechain when the Inner Ring accepts it. + """ + raise NotImplementedError("No implemethed method create") + + @abstractmethod + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ) -> List[str]: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + """ + raise NotImplementedError("No implemethed method delete") + + @abstractmethod + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get container field info.""" + raise NotImplementedError("No implemethed method get") + + @abstractmethod + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get extended ACL table of container.""" + raise NotImplementedError("No implemethed method get-eacl") + + @abstractmethod + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + **params, + ) -> List[str]: + """List all created containers.""" + raise NotImplementedError("No implemethed method list") + + @abstractmethod + def nodes( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Show the nodes participating in the container in the current epoch.""" + raise NotImplementedError("No implemethed method nodes") class GrpcClientWrapper(ABC): From eba782e7d26945d75bb1e233b16058e3b1b52f7d Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 2 Sep 2024 13:30:01 +0300 Subject: [PATCH 02/81] [#285] Change func search bucket nodes and remove old resolver bucket cnr Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/container.py | 7 ------- src/frostfs_testlib/steps/s3/s3_helper.py | 6 ++++-- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 641b321..809b39a 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -327,13 +327,6 @@ def _parse_cid(output: str) -> str: return splitted[1] -@reporter.step("Search container by name") -def search_container_by_name(name: str, node: ClusterNode): - resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) - resolver: BucketContainerResolver = resolver_cls() - return resolver.resolve(node, name) - - @reporter.step("Search for nodes with a container") def search_nodes_with_container( wallet: WalletInfo, diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 9b85766..dbf48d3 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -7,8 +7,9 @@ from dateutil.parser import parse from frostfs_testlib import reporter from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus +from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container +from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo @@ -175,10 +176,11 @@ def search_nodes_with_bucket( wallet: WalletInfo, shell: Shell, endpoint: str, + bucket_container_resolver: BucketContainerResolver, ) -> list[ClusterNode]: cid = None for cluster_node in cluster.cluster_nodes: - cid = search_container_by_name(name=bucket_name, node=cluster_node) + cid = bucket_container_resolver.resolve(cluster_node, bucket_name) if cid: break nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) From d2f8323fb95c547ae35b984744b1ef63ce502dba Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 3 Sep 2024 15:11:43 +0300 Subject: [PATCH 03/81] [#286] Change args id in shards.set-mode command Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index e88707a..82ea87b 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -40,7 +40,7 @@ class FrostfsCliShards(CliCommand): self, endpoint: str, mode: str, - id: Optional[list[str]], + id: Optional[list[str]] = None, wallet: Optional[str] = None, wallet_password: Optional[str] = None, address: Optional[str] = None, From 84e83487f9896cc1e95c64680bf7664724a4c59c Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 10 Sep 2024 13:54:51 +0300 Subject: [PATCH 04/81] [#288] Update object and chunks Clients Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/object.py | 2 +- .../grpc_operations/implementations/chunks.py | 63 +++++++++++++++---- 2 files changed, 52 insertions(+), 13 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 070def0..1857987 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -370,11 +370,11 @@ class FrostfsCliObject(CliCommand): self, rpc_endpoint: str, cid: str, + oid: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, - oid: Optional[str] = None, trace: bool = False, root: bool = False, verify_presence_all: bool = False, diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index b0f196e..d1bba9f 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -60,7 +60,6 @@ class ChunksOperations(interfaces.ChunksInterface): rpc_endpoint: str, cid: str, oid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, @@ -72,15 +71,28 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> list[Chunk]: - object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) - return self._parse_object_nodes(object_nodes.stdout) + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] @reporter.step("Get last parity chunk") def get_parity( self, rpc_endpoint: str, cid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, @@ -93,29 +105,56 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> Chunk: - object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) - return self._parse_object_nodes(object_nodes.stdout)[-1] + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] @reporter.step("Get first data chunk") def get_first_data( self, rpc_endpoint: str, cid: str, - wallet: Optional[str] = None, + oid: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, + trace: bool = True, root: bool = False, verify_presence_all: bool = False, json: bool = True, ttl: Optional[int] = None, xhdr: Optional[dict] = None, - timeout: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> Chunk: - object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) - return self._parse_object_nodes(object_nodes.stdout)[0] + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: parse_result = json.loads(object_nodes) From 565fd4c72b6ab562f3024d471ff0aad5f2f42514 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 10 Sep 2024 15:14:32 +0300 Subject: [PATCH 05/81] [#289] Move temp dir fixture to testlib Signed-off-by: a.berezin --- src/frostfs_testlib/__init__.py | 2 +- src/frostfs_testlib/fixtures.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 2cdaf4e..f3143e6 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,3 +1,3 @@ __version__ = "2.0.1" -from .fixtures import configure_testlib, hosting +from .fixtures import configure_testlib, hosting, temp_directory diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py index 8f6873f..d0f92f2 100644 --- a/src/frostfs_testlib/fixtures.py +++ b/src/frostfs_testlib/fixtures.py @@ -7,7 +7,7 @@ import yaml from frostfs_testlib import reporter from frostfs_testlib.hosting.hosting import Hosting -from frostfs_testlib.resources.common import HOSTING_CONFIG_FILE +from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE from frostfs_testlib.storage import get_service_registry @@ -24,6 +24,16 @@ def configure_testlib(): registry.register_service(svc.name, svc.load()) +@pytest.fixture(scope="session") +def temp_directory(configure_testlib): + with reporter.step("Prepare tmp directory"): + full_path = ASSETS_DIR + if not os.path.exists(full_path): + os.mkdir(full_path) + + return full_path + + @pytest.fixture(scope="session") def hosting(configure_testlib) -> Hosting: with open(HOSTING_CONFIG_FILE, "r") as file: From 36bfe385d59f9ddb69593d1095e8d15c0d1c4e0d Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 9 Sep 2024 20:44:31 +0300 Subject: [PATCH 06/81] Added method get s3 endpoint for namespace --- src/frostfs_testlib/storage/constants.py | 1 + src/frostfs_testlib/storage/dataclasses/frostfs_services.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 84f8d24..2cffd3a 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -12,6 +12,7 @@ class ConfigAttributes: REMOTE_WALLET_CONFIG = "remote_wallet_config_path" ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" + ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_PROMETHEUS = "endpoint_prometheus" CONTROL_ENDPOINT = "control_endpoint" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 16efd72..1420356 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -45,6 +45,9 @@ class S3Gate(NodeBase): self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), ] + def get_ns_endpoint(self, ns_name: str) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name) + def service_healthcheck(self) -> bool: health_metric = "frostfs_s3_gw_state_health" output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout From 4a2ac8a9b6ed8fe37c25bff91422f2d4232d2ab3 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 11 Sep 2024 10:42:51 +0300 Subject: [PATCH 07/81] [#290] Update restore traffic method Signed-off-by: Dmitriy Zayakin --- .../storage/controllers/cluster_state_controller.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 5d87a60..7f93e40 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -325,6 +325,8 @@ class ClusterStateController: node: ClusterNode, ) -> None: IpHelper.restore_input_traffic_to_node(node=node) + index = self.dropped_traffic.index(node) + self.dropped_traffic.pop(index) @reporter.step("Restore blocked nodes") def restore_all_traffic(self): @@ -531,7 +533,7 @@ class ClusterStateController: except Exception as err: logger.warning(f"Host ping fails with error {err}") return HostStatus.ONLINE - + @reporter.step("Get contract by domain - {domain_name}") def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): frostfs_adm = FrostfsAdm( From 1bee69042b1982f5167bfbef9e7b01a768452688 Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Tue, 10 Sep 2024 10:45:22 +0300 Subject: [PATCH 08/81] [#294] add wipe data using wipefs method Signed-off-by: m.malygina --- src/frostfs_testlib/hosting/docker_host.py | 8 +++++++- src/frostfs_testlib/hosting/interfaces.py | 19 +++++++++++++++++-- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 0fb5af0..5110e63 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -185,6 +185,12 @@ class DockerHost(Host): def is_file_exist(self, file_path: str) -> None: raise NotImplementedError("Not implemented for docker") + def wipefs_storage_node_data(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def finish_wipefs(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: volume_path = self.get_data_directory(service_name) @@ -240,7 +246,7 @@ class DockerHost(Host): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, - priority: Optional[str] = None + priority: Optional[str] = None, ) -> str: client = self._get_docker_client() filtered_logs = "" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 36c2804..b84326a 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -178,6 +178,21 @@ class Host(ABC): cache_only: To delete cache only. """ + @abstractmethod + def wipefs_storage_node_data(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + + def finish_wipefs(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + @abstractmethod def delete_fstree(self, service_name: str) -> None: """ @@ -297,7 +312,7 @@ class Host(ABC): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, - priority: Optional[str] = None + priority: Optional[str] = None, ) -> str: """Get logs from host filtered by regex. @@ -306,7 +321,7 @@ class Host(ABC): since: If set, limits the time from which logs should be collected. Must be in UTC. until: If set, limits the time until which logs should be collected. Must be in UTC. unit: required unit. - priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. + priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. Returns: From 0d750ed114653c05f810d35b0ab05d1104af40c2 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 17 Sep 2024 07:52:32 +0300 Subject: [PATCH 09/81] [#293] Add in CSC methods change blockchain netmap and update CliWrapper Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/node_management.py | 40 +++++---------- .../controllers/cluster_state_controller.py | 49 ++++++++++--------- .../dataclasses/storage_object_info.py | 3 ++ .../grpc_operations/implementations/chunks.py | 10 ++-- .../implementations/container.py | 3 +- .../grpc_operations/implementations/object.py | 8 +++ .../storage/grpc_operations/interfaces.py | 7 ++- 7 files changed, 63 insertions(+), 57 deletions(-) diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index ece674b..42b1fc5 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -13,6 +13,7 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.storage.cluster import Cluster, StorageNode +from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils import datetime_utils logger = logging.getLogger("NeoLogger") @@ -111,10 +112,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: storage_wallet_path = node.get_wallet_path() cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config) - return cli.netmap.snapshot( - rpc_endpoint=node.get_rpc_endpoint(), - wallet=storage_wallet_path, - ).stdout + return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout @reporter.step("Get shard list for {node}") @@ -202,12 +200,7 @@ def delete_node_data(node: StorageNode) -> None: @reporter.step("Exclude node {node_to_exclude} from network map") -def exclude_node_from_network_map( - node_to_exclude: StorageNode, - alive_node: StorageNode, - shell: Shell, - cluster: Cluster, -) -> None: +def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: node_netmap_key = node_to_exclude.get_wallet_public_key() storage_node_set_status(node_to_exclude, status="offline") @@ -221,12 +214,7 @@ def exclude_node_from_network_map( @reporter.step("Include node {node_to_include} into network map") -def include_node_to_network_map( - node_to_include: StorageNode, - alive_node: StorageNode, - shell: Shell, - cluster: Cluster, -) -> None: +def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: storage_node_set_status(node_to_include, status="online") # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. @@ -236,7 +224,7 @@ def include_node_to_network_map( tick_epoch(shell, cluster) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) - check_node_in_map(node_to_include, shell, alive_node) + await_node_in_map(node_to_include, shell, alive_node) @reporter.step("Check node {node} in network map") @@ -250,6 +238,11 @@ def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[Stor assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" +@wait_for_success(300, 15, title="Await node {node} in network map") +def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: + check_node_in_map(node, shell, alive_node) + + @reporter.step("Check node {node} NOT in network map") def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: alive_node = alive_node or node @@ -276,12 +269,7 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None: @reporter.step("Remove nodes from network map trough cli-adm morph command") -def remove_nodes_from_map_morph( - shell: Shell, - cluster: Cluster, - remove_nodes: list[StorageNode], - alive_node: Optional[StorageNode] = None, -): +def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None): """ Move node to the Offline state in the candidates list and tick an epoch to update the netmap using frostfs-adm @@ -300,9 +288,5 @@ def remove_nodes_from_map_morph( if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) - frostfsadm = FrostfsAdm( - shell=remote_shell, - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - config_file=FROSTFS_ADM_CONFIG_PATH, - ) + frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfsadm.morph.remove_nodes(node_netmap_keys) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 7f93e40..53098b1 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -14,6 +14,7 @@ from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_E from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IpHelper +from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass @@ -39,6 +40,7 @@ class ClusterStateController: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} self.dropped_traffic: list[ClusterNode] = [] + self.excluded_from_netmap: list[StorageNode] = [] self.stopped_services: set[NodeBase] = set() self.cluster = cluster self.healthcheck = healthcheck @@ -307,23 +309,14 @@ class ClusterStateController: self.suspended_services = {} @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") - def drop_traffic( - self, - node: ClusterNode, - wakeup_timeout: int, - name_interface: str, - block_nodes: list[ClusterNode] = None, - ) -> None: + def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: list_ip = self._parse_interfaces(block_nodes, name_interface) IpHelper.drop_input_traffic_to_node(node, list_ip) time.sleep(wakeup_timeout) self.dropped_traffic.append(node) @reporter.step("Start traffic to {node}") - def restore_traffic( - self, - node: ClusterNode, - ) -> None: + def restore_traffic(self, node: ClusterNode) -> None: IpHelper.restore_input_traffic_to_node(node=node) index = self.dropped_traffic.index(node) self.dropped_traffic.pop(index) @@ -410,9 +403,7 @@ class ClusterStateController: @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: frostfs_adm = FrostfsAdm( - shell=cluster_node.host.get_shell(), - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - config_file=FROSTFS_ADM_CONFIG_PATH, + shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH ) frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") @@ -453,6 +444,25 @@ class ClusterStateController: else: assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" + def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None: + alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0] + remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage) + self.excluded_from_netmap.extend(removes_nodes) + + def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode): + include_node_to_network_map(include_node, alive_node, self.shell, self.cluster) + self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node)) + + def include_all_excluded_nodes(self): + if not self.excluded_from_netmap: + return + alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0] + if not alive_node: + return + + for exclude_node in self.excluded_from_netmap.copy(): + self.include_node_to_netmap(exclude_node, alive_node) + def _get_cli( self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode ) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: @@ -469,11 +479,7 @@ class ClusterStateController: frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) - frostfs_cli_remote = FrostfsCli( - shell=shell, - frostfs_cli_exec_path=FROSTFS_CLI_EXEC, - config_file=wallet_config_path, - ) + frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) return frostfs_adm, frostfs_cli, frostfs_cli_remote def _enable_date_synchronizer(self, cluster_node: ClusterNode): @@ -536,8 +542,5 @@ class ClusterStateController: @reporter.step("Get contract by domain - {domain_name}") def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): - frostfs_adm = FrostfsAdm( - shell=cluster_node.host.get_shell(), - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - ) + frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC) return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index d192de5..55a8388 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -90,3 +90,6 @@ class Chunk: def __str__(self) -> str: return self.object_id + + def __repr__(self) -> str: + return self.object_id diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index d1bba9f..7f3161c 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -8,6 +8,7 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.cli_utils import parse_netmap_output @@ -42,6 +43,7 @@ class ChunksOperations(interfaces.ChunksInterface): if cluster_node.host_ip == node_info.node: return (cluster_node, node_info) + @wait_for_success(300, 5, fail_testcase=None) @reporter.step("Search shard with chunk {chunk}") def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}" @@ -63,7 +65,7 @@ class ChunksOperations(interfaces.ChunksInterface): address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, - trace: bool = False, + trace: bool = True, root: bool = False, verify_presence_all: bool = False, json: bool = True, @@ -86,7 +88,7 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr=xhdr, timeout=timeout, ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0]) @reporter.step("Get last parity chunk") def get_parity( @@ -97,7 +99,7 @@ class ChunksOperations(interfaces.ChunksInterface): bearer: Optional[str] = None, generate_key: Optional[bool] = None, oid: Optional[str] = None, - trace: bool = False, + trace: bool = True, root: bool = False, verify_presence_all: bool = False, json: bool = True, @@ -120,7 +122,7 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr=xhdr, timeout=timeout, ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1] @reporter.step("Get first data chunk") def get_first_data( diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index c8360ea..7a637d7 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -8,7 +8,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.s3.interfaces import BucketContainerResolver -from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.utils import json_utils @@ -266,6 +266,7 @@ class ContainerOperations(interfaces.ContainerInterface): self, endpoint: str, cid: str, + cluster: Cluster, address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index 63a2922..0e14aec 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -509,6 +509,7 @@ class ObjectOperations(interfaces.ObjectInterface): cid: str, endpoint: str, bearer: str = "", + oid: Optional[str] = None, filters: Optional[dict] = None, expected_objects_list: Optional[list] = None, xhdr: Optional[dict] = None, @@ -516,6 +517,9 @@ class ObjectOperations(interfaces.ObjectInterface): phy: bool = False, root: bool = False, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, ) -> list: """ SEARCH an Object. @@ -541,11 +545,15 @@ class ObjectOperations(interfaces.ObjectInterface): rpc_endpoint=endpoint, cid=cid, bearer=bearer, + oid=oid, xhdr=xhdr, filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, session=session, phy=phy, root=root, + address=address, + generate_key=generate_key, + ttl=ttl, timeout=timeout, ) diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py index 1947435..c293c2d 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -235,6 +235,7 @@ class ObjectInterface(ABC): cid: str, endpoint: str, bearer: str = "", + oid: Optional[str] = None, filters: Optional[dict] = None, expected_objects_list: Optional[list] = None, xhdr: Optional[dict] = None, @@ -242,6 +243,9 @@ class ObjectInterface(ABC): phy: bool = False, root: bool = False, timeout: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, ) -> List: pass @@ -368,6 +372,7 @@ class ContainerInterface(ABC): self, endpoint: str, cid: str, + cluster: Cluster, address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, @@ -376,7 +381,7 @@ class ContainerInterface(ABC): xhdr: Optional[dict] = None, generate_key: Optional[bool] = None, timeout: Optional[str] = None, - ) -> List[str]: + ) -> List[ClusterNode]: """Show the nodes participating in the container in the current epoch.""" raise NotImplementedError("No implemethed method nodes") From cef64e315ee5e872f1f1ebc9eaefcd4b5bfefc9c Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Wed, 11 Sep 2024 19:39:25 +0300 Subject: [PATCH 10/81] [#267] add no rule found object and morph chain --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 126 ++++++++++++++++++ .../resources/error_patterns.py | 1 + 2 files changed, 127 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index d8fd61c..5b808ca 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -350,3 +350,129 @@ class FrostfsAdmMorph(CliCommand): if param not in ["self", "node_netmap_keys"] }, ) + + def add_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + rule: Optional[list[str]] = None, + path: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + path: Path to encoded chain in JSON or binary format + rule: Rule statement + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control add-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address string Address of wallet account + chain-id string Chain id + chain-id-hex Flag to parse chain ID as hex + endpoint string Remote node control address (as 'multiaddr' or ':') + target-name string Resource name in APE resource name format + target-type string Resource type(container/namespace) + timeout duration Timeout for an operation (default 15s) + wallet string Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control get-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_rules( + self, + target_type: str, + target_name: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape list-rule-chains", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + all: Optional[bool] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + all: Remove all chains + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control remove-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) \ No newline at end of file diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 3b9231e..3ba5f13 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -29,3 +29,4 @@ S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" +NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" From 24b8ca73d74fbf7a52c733e72dc1e4127f55ceac Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Wed, 11 Sep 2024 22:00:21 +0300 Subject: [PATCH 11/81] [#291] get namespace endpoint --- src/frostfs_testlib/storage/dataclasses/frostfs_services.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 1420356..4f5c348 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -39,6 +39,9 @@ class S3Gate(NodeBase): def get_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) + def get_ns_endpoint(self, ns_name: str) -> str: + return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name) + def get_all_endpoints(self) -> list[str]: return [ self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), From 2976e30b75d25ad00d62529e0a68beda490ce795 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Mon, 7 Oct 2024 15:59:00 +0300 Subject: [PATCH 12/81] [#299] Add fuse to prevent similar names generation Signed-off-by: a.berezin --- src/frostfs_testlib/utils/string_utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index 80efa65..726c792 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -1,3 +1,4 @@ +import itertools import random import re import string @@ -7,6 +8,8 @@ ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation +FUSE = itertools.cycle(range(5)) + def unique_name(prefix: str = "", postfix: str = ""): """ @@ -18,7 +21,7 @@ def unique_name(prefix: str = "", postfix: str = ""): Returns: unique name string """ - return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{postfix}" + return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{next(FUSE)}{postfix}" def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): From a04eba8aecdbbc9285141c82328291eb0bf0e9b9 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 11 Oct 2024 12:23:32 +0300 Subject: [PATCH 13/81] [#302] Autoadd marks for frostfs Signed-off-by: a.berezin --- src/frostfs_testlib/__init__.py | 1 + src/frostfs_testlib/hooks.py | 12 ++++++++++++ src/frostfs_testlib/utils/string_utils.py | 1 + 3 files changed, 14 insertions(+) create mode 100644 src/frostfs_testlib/hooks.py diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index f3143e6..1ceb972 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,3 +1,4 @@ __version__ = "2.0.1" from .fixtures import configure_testlib, hosting, temp_directory +from .hooks import pytest_collection_modifyitems diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py new file mode 100644 index 0000000..df89bff --- /dev/null +++ b/src/frostfs_testlib/hooks.py @@ -0,0 +1,12 @@ +import pytest + + +@pytest.hookimpl +def pytest_collection_modifyitems(items: list[pytest.Item]): + # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding + # nodeid = full path of the test + # 1. plugins + # 2. testlib itself + for item in items: + if "frostfs" in item.nodeid and "plugin" not in item.nodeid and "testlib" not in item.nodeid: + item.add_marker("frostfs") diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index 726c792..acbca92 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -8,6 +8,7 @@ ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation +# if unique_name is called multiple times within the same microsecond, append 0-4 to the name so it surely unique FUSE = itertools.cycle(range(5)) From 2a41f2b0f64316efd83889b88b19ad7d966cb948 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 23 Sep 2024 17:54:40 +0300 Subject: [PATCH 14/81] [#301] Added interfaces for put/get lifecycle configuration to s3 clients --- pyproject.toml | 4 +-- requirements.txt | 4 +-- src/frostfs_testlib/cli/frostfs_adm/morph.py | 12 ++------ src/frostfs_testlib/s3/aws_cli_client.py | 30 +++++++++++++++++++ src/frostfs_testlib/s3/boto3_client.py | 21 +++++++++++++ src/frostfs_testlib/s3/interfaces.py | 12 ++++++++ src/frostfs_testlib/steps/epoch.py | 11 +++++-- .../testing/cluster_test_base.py | 8 ++--- 8 files changed, 80 insertions(+), 22 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 296ce65..3faa637 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,8 +27,8 @@ dependencies = [ "testrail-api>=1.12.0", "pytest==7.1.2", "tenacity==8.0.1", - "boto3==1.16.33", - "boto3-stubs[essential]==1.16.33", + "boto3==1.35.30", + "boto3-stubs[essential]==1.35.30", ] requires-python = ">=3.10" diff --git a/requirements.txt b/requirements.txt index 32e604f..e012366 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,8 +8,8 @@ docstring_parser==0.15 testrail-api==1.12.0 tenacity==8.0.1 pytest==7.1.2 -boto3==1.16.33 -boto3-stubs[essential]==1.16.33 +boto3==1.35.30 +boto3-stubs[essential]==1.35.30 # Dev dependencies black==22.8.0 diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 5b808ca..eea0985 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -69,9 +69,7 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def set_config( - self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None - ) -> CommandResult: + def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: """Add/update global config value in the FrostFS network. Args: @@ -125,7 +123,7 @@ class FrostfsAdmMorph(CliCommand): ) def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None + self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None ) -> CommandResult: """Create new FrostFS epoch event in the side chain. @@ -344,11 +342,7 @@ class FrostfsAdmMorph(CliCommand): return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self", "node_netmap_keys"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, ) def add_rule( diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 8169afe..2482376 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -754,6 +754,36 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("ObjectLockConfiguration") + @reporter.step("Put bucket lifecycle configuration") + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Get bucket lifecycle configuration") + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Delete bucket lifecycle configuration") + def delete_bucket_lifecycle(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + @staticmethod def _to_json(output: str) -> dict: json_output = {} diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index a644a6f..b638939 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -296,6 +296,27 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.delete_bucket_cors(Bucket=bucket) log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_cors result", response, {"Bucket": bucket}) + @reporter.step("Put bucket lifecycle configuration") + @report_error + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + response = self.boto3_client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle_configuration) + log_command_execution(self.s3gate_endpoint, "S3 put_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) + return response + + @reporter.step("Get bucket lifecycle configuration") + @report_error + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + response = self.boto3_client.get_bucket_lifecycle_configuration(Bucket=bucket) + log_command_execution(self.s3gate_endpoint, "S3 get_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) + return {"Rules": response.get("Rules")} + + @reporter.step("Delete bucket lifecycle configuration") + @report_error + def delete_bucket_lifecycle(self, bucket: str) -> dict: + response = self.boto3_client.delete_bucket_lifecycle(Bucket=bucket) + log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_lifecycle result", response, {"Bucket": bucket}) + return response + # END OF BUCKET METHODS # # OBJECT METHODS # diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index b1825d5..da4fc6b 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -366,6 +366,18 @@ class S3ClientWrapper(HumanReadableABC): def delete_object_tagging(self, bucket: str, key: str) -> None: """Removes the entire tag set from the specified object.""" + @abstractmethod + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + """Adds or updates bucket lifecycle configuration""" + + @abstractmethod + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + """Gets bucket lifecycle configuration""" + + @abstractmethod + def delete_bucket_lifecycle(self, bucket: str) -> dict: + """Deletes bucket lifecycle""" + @abstractmethod def get_object_attributes( self, diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index ce7ed12..6ec5483 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -69,7 +69,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] @reporter.step("Tick Epoch") -def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): +def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None, delta: Optional[int] = None): """ Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) Args: @@ -88,12 +88,17 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH, ) - frostfs_adm.morph.force_new_epoch() + frostfs_adm.morph.force_new_epoch(delta=delta) return # Otherwise we tick epoch using transaction cur_epoch = get_epoch(shell, cluster) + if delta: + next_epoch = cur_epoch + delta + else: + next_epoch = cur_epoch + 1 + # Use first node by default ir_node = cluster.services(InnerRing)[0] # In case if no local_wallet_path is provided, we use wallet_path @@ -110,7 +115,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] wallet_password=ir_wallet_pass, scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell), method="newEpoch", - arguments=f"int:{cur_epoch + 1}", + arguments=f"int:{next_epoch}", multisig_hash=f"{ir_address}:Global", address=ir_address, rpc_endpoint=morph_endpoint, diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index f2e10ad..50c8eb6 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -25,12 +25,8 @@ class ClusterTestBase: for _ in range(epochs_to_tick): self.tick_epoch(alive_node, wait_block) - def tick_epoch( - self, - alive_node: Optional[StorageNode] = None, - wait_block: int = None, - ): - epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) + def tick_epoch(self, alive_node: Optional[StorageNode] = None, wait_block: int = None, delta: Optional[int] = None): + epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node, delta=delta) if wait_block: self.wait_for_blocks(wait_block) From cf48f474ebb8aea4798e007c931ca157eb8fd7ea Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Thu, 10 Oct 2024 10:39:54 +0300 Subject: [PATCH 15/81] [#303] add check if registry is on hdd Signed-off-by: m.malygina --- src/frostfs_testlib/load/interfaces/scenario_runner.py | 5 +++++ src/frostfs_testlib/load/runners.py | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/interfaces/scenario_runner.py b/src/frostfs_testlib/load/interfaces/scenario_runner.py index 45c1317..c0062a9 100644 --- a/src/frostfs_testlib/load/interfaces/scenario_runner.py +++ b/src/frostfs_testlib/load/interfaces/scenario_runner.py @@ -1,5 +1,6 @@ from abc import ABC, abstractmethod +from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import LoadParams from frostfs_testlib.storage.cluster import ClusterNode @@ -48,3 +49,7 @@ class ScenarioRunner(ABC): @abstractmethod def get_results(self) -> dict: """Get results from K6 run""" + + @abstractmethod + def get_loaders(self) -> list[Loader]: + """Return loaders""" diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index a34786f..1ceac09 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -30,6 +30,7 @@ from frostfs_testlib.utils.file_keeper import FileKeeper class RunnerBase(ScenarioRunner): k6_instances: list[K6] + loaders: list[Loader] @reporter.step("Run preset on loaders") def preset(self): @@ -49,9 +50,11 @@ class RunnerBase(ScenarioRunner): def get_k6_instances(self): return self.k6_instances + def get_loaders(self) -> list[Loader]: + return self.loaders + class DefaultRunner(RunnerBase): - loaders: list[Loader] user: User def __init__( @@ -228,7 +231,6 @@ class DefaultRunner(RunnerBase): class LocalRunner(RunnerBase): - loaders: list[Loader] cluster_state_controller: ClusterStateController file_keeper: FileKeeper user: User From 738cfacbb7416d792c95e034bb8355acd7b1c7dd Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Mon, 7 Oct 2024 17:33:45 +0300 Subject: [PATCH 16/81] [#300] Refactor tests: use `unique_name` instead `hex + timestamp` Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/credentials/authmate_s3_provider.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/credentials/authmate_s3_provider.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py index 66c5015..ed6454b 100644 --- a/src/frostfs_testlib/credentials/authmate_s3_provider.py +++ b/src/frostfs_testlib/credentials/authmate_s3_provider.py @@ -1,5 +1,4 @@ import re -from datetime import datetime from typing import Optional from frostfs_testlib import reporter @@ -10,6 +9,7 @@ from frostfs_testlib.shell import LocalShell from frostfs_testlib.steps.cli.container import list_containers from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate +from frostfs_testlib.utils import string_utils class AuthmateS3CredentialsProvider(S3CredentialsProvider): @@ -22,7 +22,7 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider): gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] # unique short bucket name - bucket = f"bucket-{hex(int(datetime.now().timestamp()*1000000))}" + bucket = string_utils.unique_name("bucket-") frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) issue_secret_output = frostfs_authmate.secret.issue( From 5fa58a55c05f006b81954bc571e7a9e1cca1ffed Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 18 Oct 2024 13:25:12 +0300 Subject: [PATCH 17/81] [#304] Improve logging Boto3 IAM methods Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/boto3_client.py | 174 +++++++++++++++++++------ 1 file changed, 135 insertions(+), 39 deletions(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index b638939..a99b866 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -68,6 +68,7 @@ class Boto3ClientWrapper(S3ClientWrapper): self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.s3gate_endpoint: str = "" + self.iam_endpoint: str = "" self.boto3_iam_client: S3Client = None self.set_endpoint(s3gate_endpoint) @@ -90,11 +91,16 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Set endpoint IAM to {iam_endpoint}") def set_iam_endpoint(self, iam_endpoint: str): + if self.iam_endpoint == iam_endpoint: + return + + self.iam_endpoint = iam_endpoint + self.boto3_iam_client = self.session.client( service_name="iam", aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, - endpoint_url=iam_endpoint, + endpoint_url=self.iam_endpoint, verify=False, ) @@ -687,25 +693,36 @@ class Boto3ClientWrapper(S3ClientWrapper): # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) @reporter.step("Adds the specified user to the specified group") + @report_error def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.add_user_to_group(**params) + log_command_execution(self.iam_endpoint, "IAM Add User to Group", response, params) return response @reporter.step("Attaches the specified managed policy to the specified IAM group") + @report_error def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.attach_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Attach Group Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Attaches the specified managed policy to the specified user") + @report_error def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.attach_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Attach User Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + @report_error def iam_create_access_key(self, user_name: str) -> dict: response = self.boto3_iam_client.create_access_key(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Create Access Key", response, {"UserName": user_name}) access_key_id = response["AccessKey"].get("AccessKeyId") secret_access_key = response["AccessKey"].get("SecretAccessKey") @@ -715,138 +732,190 @@ class Boto3ClientWrapper(S3ClientWrapper): return access_key_id, secret_access_key @reporter.step("Creates a new group") + @report_error def iam_create_group(self, group_name: str) -> dict: response = self.boto3_iam_client.create_group(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM Create Group", response, {"GroupName": group_name}) + assert response.get("Group"), f"Expected Group in response:\n{response}" assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" return response @reporter.step("Creates a new managed policy for your AWS account") + @report_error def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + params = self._convert_to_s3_params(locals().items()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self.boto3_iam_client.create_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Create Policy", response, params) + assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" return response @reporter.step("Creates a new IAM user for your AWS account") + @report_error def iam_create_user(self, user_name: str) -> dict: response = self.boto3_iam_client.create_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Create User", response, {"UserName": user_name}) + assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" return response @reporter.step("Deletes the access key pair associated with the specified IAM user") + @report_error def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.delete_access_key(**params) + log_command_execution(self.iam_endpoint, "IAM Delete Access Key", response, params) return response @reporter.step("Deletes the specified IAM group") + @report_error def iam_delete_group(self, group_name: str) -> dict: response = self.boto3_iam_client.delete_group(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM Delete Group", response, {"GroupName": group_name}) return response @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + @report_error def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.delete_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Delete Group Policy", response, params) return response @reporter.step("Deletes the specified managed policy") + @report_error def iam_delete_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM Delete Policy", response, {"PolicyArn": policy_arn}) return response @reporter.step("Deletes the specified IAM user") + @report_error def iam_delete_user(self, user_name: str) -> dict: response = self.boto3_iam_client.delete_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Delete User", response, {"UserName": user_name}) return response @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + @report_error def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.delete_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Delete User Policy", response, params) return response @reporter.step("Removes the specified managed policy from the specified IAM group") + @report_error def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.detach_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Detach Group Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified managed policy from the specified user") + @report_error def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.detach_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Detach User Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") + @report_error def iam_get_group(self, group_name: str) -> dict: response = self.boto3_iam_client.get_group(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM Get Group", response, {"GroupName": group_name}) assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + @report_error def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) - + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.get_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Get Group Policy", response, params) return response @reporter.step("Retrieves information about the specified managed policy") + @report_error def iam_get_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM Get Policy", response, {"PolicyArn": policy_arn}) + assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" return response @reporter.step("Retrieves information about the specified version of the specified managed policy") + @report_error def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.get_policy_version(**params) + log_command_execution(self.iam_endpoint, "IAM Get Policy Version", response, params) + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" return response @reporter.step("Retrieves information about the specified IAM user") + @report_error def iam_get_user(self, user_name: str) -> dict: response = self.boto3_iam_client.get_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Get User", response, {"UserName": user_name}) + assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + @report_error def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.get_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Get User Policy", response, params) assert response.get("UserName"), f"Expected UserName in response:\n{response}" - return response @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + @report_error def iam_list_access_keys(self, user_name: str) -> dict: response = self.boto3_iam_client.list_access_keys(UserName=user_name) - + log_command_execution(self.iam_endpoint, "IAM List Access Keys", response, {"UserName": user_name}) return response @reporter.step("Lists all managed policies that are attached to the specified IAM group") + @report_error def iam_list_attached_group_policies(self, group_name: str) -> dict: response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM List Attached Group Policies", response, {"GroupName": group_name}) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" - return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") + @report_error def iam_list_attached_user_policies(self, user_name: str) -> dict: response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List Attached User Policies", response, {"UserName": user_name}) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" - return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + @report_error def iam_list_entities_for_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM List Entities For Policy", response, {"PolicyArn": policy_arn}) assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" @@ -854,98 +923,125 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + @report_error def iam_list_group_policies(self, group_name: str) -> dict: response = self.boto3_iam_client.list_group_policies(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM List Group Policies", response, {"GroupName": group_name}) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" - return response @reporter.step("Lists the IAM groups") + @report_error def iam_list_groups(self) -> dict: response = self.boto3_iam_client.list_groups() + log_command_execution(self.iam_endpoint, "IAM List Groups", response) assert response.get("Groups"), f"Expected Groups in response:\n{response}" - return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + @report_error def iam_list_groups_for_user(self, user_name: str) -> dict: response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List Groups For User", response, {"UserName": user_name}) assert response.get("Groups"), f"Expected Groups in response:\n{response}" - return response @reporter.step("Lists all the managed policies that are available in your AWS account") + @report_error def iam_list_policies(self) -> dict: response = self.boto3_iam_client.list_policies() + log_command_execution(self.iam_endpoint, "IAM List Policies", response) assert response.get("Policies"), f"Expected Policies in response:\n{response}" - return response @reporter.step("Lists information about the versions of the specified managed policy") + @report_error def iam_list_policy_versions(self, policy_arn: str) -> dict: response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM List Policy Versions", response, {"PolicyArn": policy_arn}) assert response.get("Versions"), f"Expected Versions in response:\n{response}" - return response @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + @report_error def iam_list_user_policies(self, user_name: str) -> dict: response = self.boto3_iam_client.list_user_policies(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List User Policies", response, {"UserName": user_name}) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" - return response @reporter.step("Lists the IAM users") + @report_error def iam_list_users(self) -> dict: response = self.boto3_iam_client.list_users() + log_command_execution(self.iam_endpoint, "IAM List Users", response) assert response.get("Users"), f"Expected Users in response:\n{response}" - return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + @report_error def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.put_group_policy( - GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) - ) + params = self._convert_to_s3_params(locals().items()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self.boto3_iam_client.put_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Put Group Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + @report_error def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.put_user_policy( - UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) - ) + params = self._convert_to_s3_params(locals().items()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self.boto3_iam_client.put_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Put User Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified user from the specified group") + @report_error def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.remove_user_from_group(**params) + log_command_execution(self.iam_endpoint, "IAM Remove User From Group", response, params) return response @reporter.step("Updates the name and/or the path of the specified IAM group") + @report_error def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath="/") - + params = {"GroupName": group_name, "NewGroupName": new_name, "NewPath": "/"} + response = self.boto3_iam_client.update_group(**params) + log_command_execution(self.iam_endpoint, "IAM Update Group", response, params) return response @reporter.step("Updates the name and/or the path of the specified IAM user") + @report_error def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath="/") + params = {"UserName": user_name, "NewUserName": new_name, "NewPath": "/"} + response = self.boto3_iam_client.update_user(**params) + log_command_execution(self.iam_endpoint, "IAM Update User", response, params) return response @reporter.step("Adds one or more tags to an IAM user") + @report_error def iam_tag_user(self, user_name: str, tags: list) -> dict: - tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - response = self.boto3_iam_client.tag_user(UserName=user_name, Tags=tags_json) + params = self._convert_to_s3_params(locals().items()) + params["Tags"] = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + response = self.boto3_iam_client.tag_user(**params) + log_command_execution(self.iam_endpoint, "IAM Tag User", response, params) return response @reporter.step("List tags of IAM user") + @report_error def iam_list_user_tags(self, user_name: str) -> dict: response = self.boto3_iam_client.list_user_tags(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List User Tags", response, {"UserName": user_name}) return response @reporter.step("Removes the specified tags from the user") + @report_error def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - response = self.boto3_iam_client.untag_user(UserName=user_name, TagKeys=tag_keys) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.untag_user(**params) + log_command_execution(self.iam_endpoint, "IAM Untag User", response, params) return response From 3f3be83d90cb3226268f00746e67f433b63c90be Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 21 Oct 2024 09:01:37 +0300 Subject: [PATCH 18/81] [#305] Added IAM abstract method --- src/frostfs_testlib/s3/interfaces.py | 4 ++++ src/frostfs_testlib/steps/metrics.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index da4fc6b..c084484 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -58,6 +58,10 @@ class S3ClientWrapper(HumanReadableABC): def set_endpoint(self, s3gate_endpoint: str): """Set endpoint""" + @abstractmethod + def set_iam_endpoint(self, iam_endpoint: str): + """Set iam endpoint""" + @abstractmethod def create_bucket( self, diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py index 29e49d4..a9e545a 100644 --- a/src/frostfs_testlib/steps/metrics.py +++ b/src/frostfs_testlib/steps/metrics.py @@ -1,8 +1,8 @@ import re from frostfs_testlib import reporter -from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import wait_for_success @reporter.step("Check metrics result") @@ -19,7 +19,7 @@ def check_metrics_counter( counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) assert eval( f"{counter_act} {operator} {counter_exp}" - ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}" + ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}" @reporter.step("Get metrics value from node: {node}") From b2bf6677f184fdb2d92045d753722fd651091e46 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 25 Oct 2024 18:52:43 +0300 Subject: [PATCH 19/81] [#310] Update test marking Signed-off-by: a.berezin --- src/frostfs_testlib/hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index df89bff..6830e78 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -8,5 +8,6 @@ def pytest_collection_modifyitems(items: list[pytest.Item]): # 1. plugins # 2. testlib itself for item in items: - if "frostfs" in item.nodeid and "plugin" not in item.nodeid and "testlib" not in item.nodeid: + location = item.location[0] + if "frostfs" in location and "plugin" not in location and "testlib" not in location: item.add_marker("frostfs") From e6faddedeb008950583174659eb52374bd475e5d Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Mon, 21 Oct 2024 23:47:47 +0300 Subject: [PATCH 20/81] [#297] add morph rule chain --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 28 ++++++------------- .../storage/dataclasses/ape.py | 15 ++++++++++ 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index eea0985..7228692 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -122,9 +122,7 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None - ) -> CommandResult: + def force_new_epoch(self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: """Create new FrostFS epoch event in the side chain. Args: @@ -343,11 +341,11 @@ class FrostfsAdmMorph(CliCommand): return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, + **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, ) - + def add_rule( self, - endpoint: str, chain_id: str, target_name: str, target_type: str, @@ -361,10 +359,8 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address: Address of wallet account chain-id: Assign ID to the parsed chain chain-id-hex: Flag to parse chain ID as hex - endpoint: Remote node control address (as 'multiaddr' or ':') path: Path to encoded chain in JSON or binary format rule: Rule statement target-name: Resource name in APE resource name format @@ -376,13 +372,12 @@ class FrostfsAdmMorph(CliCommand): Command`s result. """ return self._execute( - "control add-rule", + "morph ape add-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, ) def get_rule( self, - endpoint: str, chain_id: str, target_name: str, target_type: str, @@ -394,10 +389,8 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address string Address of wallet account chain-id string Chain id chain-id-hex Flag to parse chain ID as hex - endpoint string Remote node control address (as 'multiaddr' or ':') target-name string Resource name in APE resource name format target-type string Resource type(container/namespace) timeout duration Timeout for an operation (default 15s) @@ -407,7 +400,7 @@ class FrostfsAdmMorph(CliCommand): Command`s result. """ return self._execute( - "control get-rule", + "morph ape get-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, ) @@ -423,8 +416,6 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') target-name: Resource name in APE resource name format target-type: Resource type(container/namespace) timeout: Timeout for an operation (default 15s) @@ -437,10 +428,9 @@ class FrostfsAdmMorph(CliCommand): "morph ape list-rule-chains", **{param: value for param, value in locals().items() if param not in ["self"]}, ) - + def remove_rule( self, - endpoint: str, chain_id: str, target_name: str, target_type: str, @@ -453,11 +443,9 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address: Address of wallet account all: Remove all chains chain-id: Assign ID to the parsed chain chain-id-hex: Flag to parse chain ID as hex - endpoint: Remote node control address (as 'multiaddr' or ':') target-name: Resource name in APE resource name format target-type: Resource type(container/namespace) timeout: Timeout for an operation (default 15s) @@ -467,6 +455,6 @@ class FrostfsAdmMorph(CliCommand): Command`s result. """ return self._execute( - "control remove-rule", + "morph ape rm-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, - ) \ No newline at end of file + ) diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index b6563f4..f0f1758 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -26,6 +26,21 @@ class ObjectOperations(HumanReadableEnum): return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] +@dataclass +class Operations: + GET_CONTAINER = "GetContainer" + PUT_CONTAINER = "PutContainer" + DELETE_CONTAINER = "DeleteContainer" + LIST_CONTAINER = "ListContainers" + GET_OBJECT = "GetObject" + DELETE_OBJECT = "DeleteObject" + HASH_OBJECT = "HashObject" + RANGE_OBJECT = "RangeObject" + SEARCH_OBJECT = "SearchObject" + HEAD_OBJECT = "HeadObject" + PUT_OBJECT = "PutObject" + + class Verb(HumanReadableEnum): ALLOW = "allow" DENY = "deny" From 3d6a356e20b5ce13350b1507d7d45e74749b37d7 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 18 Oct 2024 15:57:40 +0300 Subject: [PATCH 21/81] [#306] Fix handling of bucket names in AWS CLI - Add quotes around container names if they contain spaces or `-`. Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/aws_cli_client.py | 154 +++++++++++++++++++++++ 1 file changed, 154 insertions(+) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 2482376..ff4e329 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -70,6 +70,9 @@ class AwsCliClient(S3ClientWrapper): if bucket is None: bucket = string_utils.unique_name("bucket-") + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + if object_lock_enabled_for_bucket is None: object_lock = "" elif object_lock_enabled_for_bucket: @@ -103,16 +106,25 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd, command_options) @reporter.step("Head bucket S3") def head_bucket(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd) @reporter.step("Put bucket versioning status") def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " f"--versioning-configuration Status={status.value} " @@ -122,6 +134,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket versioning status") def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -132,6 +147,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket tagging") def put_bucket_tagging(self, bucket: str, tags: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} cmd = ( f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " @@ -141,6 +159,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -151,6 +172,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) @@ -160,6 +184,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -170,6 +197,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -181,6 +211,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects S3 v2") def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -195,6 +228,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects versions S3") def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -205,6 +241,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects delete markers S3") def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -228,8 +267,13 @@ class AwsCliClient(S3ClientWrapper): ) -> str: if bucket is None: bucket = source_bucket + + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + if key is None: key = string_utils.unique_name("copy-object-") + copy_source = f"{source_bucket}/{source_key}" cmd = ( @@ -266,6 +310,9 @@ class AwsCliClient(S3ClientWrapper): grant_full_control: Optional[str] = None, grant_read: Optional[str] = None, ) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + if key is None: key = os.path.basename(filepath) @@ -297,6 +344,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Head object S3") def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " @@ -315,6 +365,9 @@ class AwsCliClient(S3ClientWrapper): object_range: Optional[tuple[int, int]] = None, full_output: bool = False, ) -> dict | TestFile: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -329,6 +382,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get object ACL") def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " @@ -347,6 +403,9 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -369,6 +428,9 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -383,6 +445,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete objects S3") def delete_objects(self, bucket: str, keys: list[str]) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") delete_structure = json.dumps(_make_objs_dict(keys)) with open(file_path, "w") as out_file: @@ -399,6 +464,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object S3") def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api delete-object --bucket {bucket} " @@ -409,6 +477,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object versions S3") def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + # Build deletion list in S3 format delete_list = { "Objects": [ @@ -435,6 +506,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object versions S3 without delete markers") def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + # Delete objects without creating delete markers for object_version in object_versions: self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) @@ -450,6 +524,8 @@ class AwsCliClient(S3ClientWrapper): part_number: int = 0, full_output: bool = True, ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' attrs = ",".join(attributes) version = f" --version-id {version_id}" if version_id else "" @@ -473,6 +549,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket policy") def get_bucket_policy(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -483,6 +562,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket policy") def delete_bucket_policy(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -493,6 +575,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket policy") def put_bucket_policy(self, bucket: str, policy: dict) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + # Leaving it as is was in test repo. Double dumps to escape resulting string # Example: # policy = {"a": 1} @@ -508,6 +593,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -518,6 +606,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket cors") def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -526,6 +617,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -534,6 +628,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -549,6 +646,9 @@ class AwsCliClient(S3ClientWrapper): version_id: Optional[str] = None, bypass_governance_retention: Optional[bool] = None, ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " @@ -566,6 +666,9 @@ class AwsCliClient(S3ClientWrapper): legal_hold_status: Literal["ON", "OFF"], version_id: Optional[str] = None, ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" legal_hold = json.dumps({"Status": legal_hold_status}) cmd = ( @@ -576,6 +679,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put object tagging") def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} version = f" --version-id {version_id}" if version_id else "" @@ -587,6 +693,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get object tagging") def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " @@ -598,6 +707,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object tagging") def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " @@ -613,6 +725,9 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) @@ -633,6 +748,9 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}" @@ -648,6 +766,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Create multipart upload S3") def create_multipart_upload(self, bucket: str, key: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -661,6 +782,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List multipart uploads S3") def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -671,6 +795,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Abort multipart upload S3") def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -679,6 +806,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Upload part S3") def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " @@ -691,6 +821,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Upload copy part S3") def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " @@ -704,6 +837,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List parts S3") def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -717,6 +853,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Complete multipart upload S3") def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} @@ -737,6 +876,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -746,6 +888,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get object lock configuration") def get_object_lock_configuration(self, bucket: str): + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -756,6 +901,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket lifecycle configuration") def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}" @@ -766,6 +914,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket lifecycle configuration") def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -776,6 +927,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket lifecycle configuration") def delete_bucket_lifecycle(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" From 26139767f4118f1655c067ffa316e6ae9ebf6064 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Wed, 23 Oct 2024 14:08:54 +0300 Subject: [PATCH 22/81] [#311] Add AWS CLI command to report from Boto3 request Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/boto3_client.py | 1000 +++++++++++++++--------- src/frostfs_testlib/utils/cli_utils.py | 72 +- 2 files changed, 672 insertions(+), 400 deletions(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index a99b866..91d8c5a 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -1,8 +1,8 @@ import json import logging import os +from collections.abc import Callable from datetime import datetime -from functools import wraps from time import sleep from typing import Literal, Optional, Union @@ -28,48 +28,32 @@ logger = logging.getLogger("NeoLogger") urllib3.disable_warnings() -def report_error(func): - @wraps(func) - def deco(*a, **kw): - try: - return func(*a, **kw) - except ClientError as err: - url = None - params = {"args": a, "kwargs": kw} - - if isinstance(a[0], Boto3ClientWrapper): - client: Boto3ClientWrapper = a[0] - url = client.s3gate_endpoint - params = {"args": a[1:], "kwargs": kw} - - log_command_execution(url, f"Failed {err.operation_name}", err.response, params) - raise - - return deco - - class Boto3ClientWrapper(S3ClientWrapper): __repr_name__: str = "Boto3 client" @reporter.step("Configure S3 client (boto3)") - @report_error def __init__( self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.boto3_client: S3Client = None - self.session = boto3.Session() + self.s3gate_endpoint: str = "" + + self.boto3_iam_client: S3Client = None + self.iam_endpoint: str = "" + + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key + self.profile = profile self.region = region + + self.session = boto3.Session() self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE, } ) - self.access_key_id: str = access_key_id - self.secret_access_key: str = secret_access_key - self.s3gate_endpoint: str = "" - self.iam_endpoint: str = "" - self.boto3_iam_client: S3Client = None + self.set_endpoint(s3gate_endpoint) @reporter.step("Set endpoint S3 to {s3gate_endpoint}") @@ -116,13 +100,24 @@ class Boto3ClientWrapper(S3ClientWrapper): return result def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict: - if not exclude: - exclude = ["self"] - return {self._to_s3_param(param): value for param, value in scope if param not in exclude and value is not None} + exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] + return {self._to_s3_param(param): value for param, value in scope.items() if param not in exclude and value is not None} + + def _exec_request(self, method: Callable, params: Optional[dict] = None, **kwargs): + if not params: + params = {} + + try: + result = method(**params) + except ClientError as err: + log_command_execution(method.__name__, err.response, params, **kwargs) + raise + + log_command_execution(method.__name__, result, params, **kwargs) + return result # BUCKET METHODS # @reporter.step("Create bucket S3") - @report_error def create_bucket( self, bucket: Optional[str] = None, @@ -151,81 +146,98 @@ class Boto3ClientWrapper(S3ClientWrapper): if location_constraint: params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) - s3_bucket = self.boto3_client.create_bucket(**params) - log_command_execution(self.s3gate_endpoint, f"Created S3 bucket {bucket}", s3_bucket, params) + self._exec_request(self.boto3_client.create_bucket, params, endpoint=self.s3gate_endpoint, profile=self.profile) return bucket @reporter.step("List buckets S3") - @report_error def list_buckets(self) -> list[str]: - found_buckets = [] - - response = self.boto3_client.list_buckets() - log_command_execution(self.s3gate_endpoint, "S3 List buckets result", response) - - for bucket in response["Buckets"]: - found_buckets.append(bucket["Name"]) - - return found_buckets + response = self._exec_request( + self.boto3_client.list_buckets, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return [bucket["Name"] for bucket in response["Buckets"]] @reporter.step("Delete bucket S3") - @report_error def delete_bucket(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Delete bucket result", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.delete_bucket, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Head bucket S3") - @report_error def head_bucket(self, bucket: str) -> None: - response = self.boto3_client.head_bucket(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Head bucket result", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.head_bucket, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket versioning status") - @report_error def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}} - response = self.boto3_client.put_bucket_versioning(**params) - log_command_execution(self.s3gate_endpoint, "S3 Set bucket versioning to", response, params) + self._exec_request( + self.boto3_client.put_bucket_versioning, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket versioning status") - @report_error def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: - response = self.boto3_client.get_bucket_versioning(Bucket=bucket) - status = response.get("Status") - log_command_execution(self.s3gate_endpoint, "S3 Got bucket versioning status", response, {"Bucket": bucket}) - return status + response = self._exec_request( + self.boto3_client.get_bucket_versioning, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Status") @reporter.step("Put bucket tagging") - @report_error def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) - response = self.boto3_client.put_bucket_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put bucket tagging", response, params) + params = self._convert_to_s3_params(locals(), exclude=["tags"]) + self._exec_request( + self.boto3_client.put_bucket_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket tagging") - @report_error def get_bucket_tagging(self, bucket: str) -> list: - response = self.boto3_client.get_bucket_tagging(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Get bucket tagging", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_tagging, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("TagSet") @reporter.step("Get bucket acl") - @report_error def get_bucket_acl(self, bucket: str) -> list: - response = self.boto3_client.get_bucket_acl(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Get bucket acl", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_acl, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Grants") @reporter.step("Delete bucket tagging") - @report_error def delete_bucket_tagging(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Delete bucket tagging", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.delete_bucket_tagging, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket ACL") - @report_error def put_bucket_acl( self, bucket: str, @@ -233,141 +245,181 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_bucket_acl(**params) - log_command_execution(self.s3gate_endpoint, "S3 ACL bucket result", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.put_bucket_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object lock configuration") - @report_error def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: params = {"Bucket": bucket, "ObjectLockConfiguration": configuration} - response = self.boto3_client.put_object_lock_configuration(**params) - log_command_execution(self.s3gate_endpoint, "S3 put_object_lock_configuration result", response, params) - return response + return self._exec_request( + self.boto3_client.put_object_lock_configuration, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get object lock configuration") - @report_error def get_object_lock_configuration(self, bucket: str) -> dict: - response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_object_lock_configuration result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_object_lock_configuration, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("ObjectLockConfiguration") @reporter.step("Get bucket policy") - @report_error def get_bucket_policy(self, bucket: str) -> str: - response = self.boto3_client.get_bucket_policy(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_policy result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_policy, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Policy") @reporter.step("Delete bucket policy") - @report_error def delete_bucket_policy(self, bucket: str) -> str: - response = self.boto3_client.delete_bucket_policy(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_policy result", response, {"Bucket": bucket}) - return response + return self._exec_request( + self.boto3_client.delete_bucket_policy, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket policy") - @report_error def put_bucket_policy(self, bucket: str, policy: dict) -> None: params = {"Bucket": bucket, "Policy": json.dumps(policy)} - response = self.boto3_client.put_bucket_policy(**params) - log_command_execution(self.s3gate_endpoint, "S3 put_bucket_policy result", response, params) - return response + return self._exec_request( + self.boto3_client.put_bucket_policy, + params, + # Overriding option for AWS CLI + policy=policy, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket cors") - @report_error def get_bucket_cors(self, bucket: str) -> dict: - response = self.boto3_client.get_bucket_cors(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_cors result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_cors, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("CORSRules") @reporter.step("Get bucket location") - @report_error def get_bucket_location(self, bucket: str) -> str: - response = self.boto3_client.get_bucket_location(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_location result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_location, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("LocationConstraint") @reporter.step("Put bucket cors") - @report_error def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_bucket_cors(**params) - log_command_execution(self.s3gate_endpoint, "S3 put_bucket_cors result", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.put_bucket_cors, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete bucket cors") - @report_error def delete_bucket_cors(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket_cors(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_cors result", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.delete_bucket_cors, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket lifecycle configuration") - @report_error def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: - response = self.boto3_client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle_configuration) - log_command_execution(self.s3gate_endpoint, "S3 put_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) - return response + params = self._convert_to_s3_params(locals(), exclude=["dumped_configuration"]) + return self._exec_request( + self.boto3_client.put_bucket_lifecycle_configuration, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket lifecycle configuration") - @report_error def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: - response = self.boto3_client.get_bucket_lifecycle_configuration(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_lifecycle_configuration, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return {"Rules": response.get("Rules")} @reporter.step("Delete bucket lifecycle configuration") - @report_error def delete_bucket_lifecycle(self, bucket: str) -> dict: - response = self.boto3_client.delete_bucket_lifecycle(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_lifecycle result", response, {"Bucket": bucket}) - return response + return self._exec_request( + self.boto3_client.delete_bucket_lifecycle, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) # END OF BUCKET METHODS # # OBJECT METHODS # @reporter.step("List objects S3 v2") - @report_error def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_objects_v2(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 v2 List objects result", response, params) - + response = self._exec_request( + self.boto3_client.list_objects_v2, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) obj_list = [obj["Key"] for obj in response.get("Contents", [])] logger.info(f"Found s3 objects: {obj_list}") - return response if full_output else obj_list @reporter.step("List objects S3") - @report_error def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_objects(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List objects result", response, params) - + response = self._exec_request( + self.boto3_client.list_objects, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) obj_list = [obj["Key"] for obj in response.get("Contents", [])] logger.info(f"Found s3 objects: {obj_list}") - return response if full_output else obj_list @reporter.step("List objects versions S3") - @report_error def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List objects versions result", response, params) + response = self._exec_request( + self.boto3_client.list_object_versions, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response if full_output else response.get("Versions", []) @reporter.step("List objects delete markers S3") - @report_error def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List objects delete markers result", response, params) + response = self._exec_request( + self.boto3_client.list_object_versions, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response if full_output else response.get("DeleteMarkers", []) @reporter.step("Put object S3") - @report_error def put_object( self, bucket: str, @@ -388,40 +440,53 @@ class Boto3ClientWrapper(S3ClientWrapper): with open(filepath, "rb") as put_file: body = put_file.read() - params = self._convert_to_s3_params(locals().items(), exclude=["self", "filepath", "put_file", "body"]) - response = self.boto3_client.put_object(Body=body, **params) - log_command_execution(self.s3gate_endpoint, "S3 Put object result", response, params) + params = self._convert_to_s3_params(locals(), exclude=["filepath", "put_file"]) + response = self._exec_request( + self.boto3_client.put_object, + params, + body=filepath, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("VersionId") @reporter.step("Head object S3") - @report_error def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.head_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Head object result", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.head_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete object S3") - @report_error def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.delete_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.delete_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete objects S3") - @report_error def delete_objects(self, bucket: str, keys: list[str]) -> dict: params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)} - response = self.boto3_client.delete_objects(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params) + response = self._exec_request( + self.boto3_client.delete_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' + return response @reporter.step("Delete object versions S3") - @report_error def delete_object_versions(self, bucket: str, object_versions: list) -> dict: # Build deletion list in S3 format delete_list = { @@ -434,21 +499,26 @@ class Boto3ClientWrapper(S3ClientWrapper): ] } params = {"Bucket": bucket, "Delete": delete_list} - response = self.boto3_client.delete_objects(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params) - return response + return self._exec_request( + self.boto3_client.delete_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete object versions S3 without delete markers") - @report_error def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]} - response = self.boto3_client.delete_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) + self._exec_request( + self.boto3_client.delete_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object ACL") - @report_error def put_object_acl( self, bucket: str, @@ -457,21 +527,27 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_object_acl(**params) - log_command_execution(self.s3gate_endpoint, "S3 put object ACL", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.put_object_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Grants") @reporter.step("Get object ACL") - @report_error def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.get_object_acl(**params) - log_command_execution(self.s3gate_endpoint, "S3 ACL objects result", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.get_object_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Grants") @reporter.step("Copy object S3") - @report_error def copy_object( self, source_bucket: str, @@ -486,17 +562,22 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> str: if bucket is None: bucket = source_bucket + if key is None: key = string_utils.unique_name("copy-object-") - copy_source = f"{source_bucket}/{source_key}" - params = self._convert_to_s3_params(locals().items(), exclude=["self", "source_bucket", "source_key"]) - response = self.boto3_client.copy_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Copy objects result", response, params) + copy_source = f"{source_bucket}/{source_key}" + params = self._convert_to_s3_params(locals(), exclude=["source_bucket", "source_key"]) + + self._exec_request( + self.boto3_client.copy_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return key @reporter.step("Get object S3") - @report_error def get_object( self, bucket: str, @@ -509,12 +590,15 @@ class Boto3ClientWrapper(S3ClientWrapper): if object_range: range_str = f"bytes={object_range[0]}-{object_range[1]}" - params = self._convert_to_s3_params( - {**locals(), **{"Range": range_str}}.items(), - exclude=["self", "object_range", "full_output", "range_str"], + params = locals() + params.update({"Range": f"bytes={object_range[0]}-{object_range[1]}"} if object_range else {}) + params = self._convert_to_s3_params(params, exclude=["object_range", "full_output", "range_str"]) + response = self._exec_request( + self.boto3_client.get_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, ) - response = self.boto3_client.get_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Get objects result", response, params) if full_output: return response @@ -528,78 +612,93 @@ class Boto3ClientWrapper(S3ClientWrapper): return test_file @reporter.step("Create multipart upload S3") - @report_error def create_multipart_upload(self, bucket: str, key: str) -> str: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.create_multipart_upload(**params) - log_command_execution(self.s3gate_endpoint, "S3 Created multipart upload", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.create_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" - return response["UploadId"] @reporter.step("List multipart uploads S3") - @report_error def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: - response = self.boto3_client.list_multipart_uploads(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List multipart upload", response, {"Bucket": bucket}) - + response = self._exec_request( + self.boto3_client.list_multipart_uploads, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Uploads") @reporter.step("Abort multipart upload S3") - @report_error def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.abort_multipart_upload(**params) - log_command_execution(self.s3gate_endpoint, "S3 Abort multipart upload", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.abort_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Upload part S3") - @report_error def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: with open(filepath, "rb") as put_file: body = put_file.read() - params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath", "body"]) + params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) params["PartNumber"] = part_num - response = self.boto3_client.upload_part(Body=body, **params) - log_command_execution(self.s3gate_endpoint, "S3 Upload part", response, params) - assert response.get("ETag"), f"Expected ETag in response:\n{response}" + response = self._exec_request( + self.boto3_client.upload_part, + params, + body=filepath, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] @reporter.step("Upload copy part S3") - @report_error def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath"]) + params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) params["PartNumber"] = part_num - response = self.boto3_client.upload_part_copy(**params) - log_command_execution(self.s3gate_endpoint, "S3 Upload copy part", response, params) + response = self._exec_request( + self.boto3_client.upload_part_copy, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" - return response["CopyPartResult"]["ETag"] @reporter.step("List parts S3") - @report_error def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_parts(**params) - log_command_execution(self.s3gate_endpoint, "S3 List part", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.list_parts, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) assert response.get("Parts"), f"Expected Parts in response:\n{response}" - return response["Parts"] @reporter.step("Complete multipart upload S3") - @report_error def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] - params = self._convert_to_s3_params(locals().items(), exclude=["self", "parts"]) + params = self._convert_to_s3_params(locals(), exclude=["parts"]) params["MultipartUpload"] = {"Parts": parts} - response = self.boto3_client.complete_multipart_upload(**params) - log_command_execution(self.s3gate_endpoint, "S3 Complete multipart upload", response, params) - - return response + return self._exec_request( + self.boto3_client.complete_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object retention") - @report_error def put_object_retention( self, bucket: str, @@ -608,12 +707,15 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, bypass_governance_retention: Optional[bool] = None, ) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_object_retention(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put object retention ", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.put_object_retention, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object legal hold") - @report_error def put_object_legal_hold( self, bucket: str, @@ -622,36 +724,48 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, ) -> None: legal_hold = {"Status": legal_hold_status} - params = self._convert_to_s3_params(locals().items(), exclude=["self", "legal_hold_status"]) - response = self.boto3_client.put_object_legal_hold(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put object legal hold ", response, params) + params = self._convert_to_s3_params(locals(), exclude=["legal_hold_status"]) + self._exec_request( + self.boto3_client.put_object_legal_hold, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object tagging") - @report_error def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) - response = self.boto3_client.put_object_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put object tagging", response, params) + params = self._convert_to_s3_params(locals(), exclude=["tags"]) + self._exec_request( + self.boto3_client.put_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get object tagging") - @report_error def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.get_object_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Get object tagging", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.get_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("TagSet") @reporter.step("Delete object tagging") - @report_error def delete_object_tagging(self, bucket: str, key: str) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.delete_object_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete object tagging", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.delete_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get object attributes") - @report_error def get_object_attributes( self, bucket: str, @@ -666,7 +780,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return {} @reporter.step("Sync directory S3") - @report_error def sync( self, bucket: str, @@ -677,7 +790,6 @@ class Boto3ClientWrapper(S3ClientWrapper): raise NotImplementedError("Sync is not supported for boto3 client") @reporter.step("CP directory S3") - @report_error def cp( self, bucket: str, @@ -693,36 +805,47 @@ class Boto3ClientWrapper(S3ClientWrapper): # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) @reporter.step("Adds the specified user to the specified group") - @report_error def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.add_user_to_group(**params) - log_command_execution(self.iam_endpoint, "IAM Add User to Group", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.add_user_to_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Attaches the specified managed policy to the specified IAM group") - @report_error def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.attach_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Attach Group Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.attach_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Attaches the specified managed policy to the specified user") - @report_error def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.attach_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Attach User Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.attach_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") - @report_error def iam_create_access_key(self, user_name: str) -> dict: - response = self.boto3_iam_client.create_access_key(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Create Access Key", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.create_access_key, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) access_key_id = response["AccessKey"].get("AccessKeyId") secret_access_key = response["AccessKey"].get("SecretAccessKey") @@ -732,10 +855,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return access_key_id, secret_access_key @reporter.step("Creates a new group") - @report_error def iam_create_group(self, group_name: str) -> dict: - response = self.boto3_iam_client.create_group(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM Create Group", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.create_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Group"), f"Expected Group in response:\n{response}" assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" @@ -743,12 +869,17 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Creates a new managed policy for your AWS account") - @report_error def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["PolicyDocument"] = json.dumps(policy_document) - response = self.boto3_iam_client.create_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Create Policy", response, params) + response = self._exec_request( + self.boto3_iam_client.create_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" @@ -756,10 +887,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Creates a new IAM user for your AWS account") - @report_error def iam_create_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.create_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Create User", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.create_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" @@ -767,89 +901,115 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Deletes the access key pair associated with the specified IAM user") - @report_error def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.delete_access_key(**params) - log_command_execution(self.iam_endpoint, "IAM Delete Access Key", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_access_key, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified IAM group") - @report_error def iam_delete_group(self, group_name: str) -> dict: - response = self.boto3_iam_client.delete_group(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM Delete Group", response, {"GroupName": group_name}) - return response + return self._exec_request( + self.boto3_iam_client.delete_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") - @report_error def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.delete_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Delete Group Policy", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified managed policy") - @report_error def iam_delete_policy(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM Delete Policy", response, {"PolicyArn": policy_arn}) - return response + return self._exec_request( + self.boto3_iam_client.delete_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified IAM user") - @report_error def iam_delete_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.delete_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Delete User", response, {"UserName": user_name}) - return response + return self._exec_request( + self.boto3_iam_client.delete_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") - @report_error def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.delete_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Delete User Policy", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Removes the specified managed policy from the specified IAM group") - @report_error def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.detach_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Detach Group Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.detach_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified managed policy from the specified user") - @report_error def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.detach_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Detach User Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.detach_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") - @report_error def iam_get_group(self, group_name: str) -> dict: - response = self.boto3_iam_client.get_group(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM Get Group", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.get_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") - @report_error def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.get_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Get Group Policy", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.get_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Retrieves information about the specified managed policy") - @report_error def iam_get_policy(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM Get Policy", response, {"PolicyArn": policy_arn}) + response = self._exec_request( + self.boto3_iam_client.get_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" @@ -857,11 +1017,14 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Retrieves information about the specified version of the specified managed policy") - @report_error def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.get_policy_version(**params) - log_command_execution(self.iam_endpoint, "IAM Get Policy Version", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.get_policy_version, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" @@ -869,10 +1032,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Retrieves information about the specified IAM user") - @report_error def iam_get_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.get_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Get User", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.get_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" @@ -880,42 +1046,56 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") - @report_error def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.get_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Get User Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.get_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("UserName"), f"Expected UserName in response:\n{response}" return response @reporter.step("Returns information about the access key IDs associated with the specified IAM user") - @report_error def iam_list_access_keys(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_access_keys(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List Access Keys", response, {"UserName": user_name}) - return response + return self._exec_request( + self.boto3_iam_client.list_access_keys, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Lists all managed policies that are attached to the specified IAM group") - @report_error def iam_list_attached_group_policies(self, group_name: str) -> dict: - response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM List Attached Group Policies", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.list_attached_group_policies, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") - @report_error def iam_list_attached_user_policies(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List Attached User Policies", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.list_attached_user_policies, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") - @report_error def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM List Entities For Policy", response, {"PolicyArn": policy_arn}) + response = self._exec_request( + self.boto3_iam_client.list_entities_for_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" @@ -923,125 +1103,165 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") - @report_error def iam_list_group_policies(self, group_name: str) -> dict: - response = self.boto3_iam_client.list_group_policies(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM List Group Policies", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.list_group_policies, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM groups") - @report_error def iam_list_groups(self) -> dict: - response = self.boto3_iam_client.list_groups() - log_command_execution(self.iam_endpoint, "IAM List Groups", response) + response = self._exec_request( + self.boto3_iam_client.list_groups, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") - @report_error def iam_list_groups_for_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List Groups For User", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.list_groups_for_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists all the managed policies that are available in your AWS account") - @report_error def iam_list_policies(self) -> dict: - response = self.boto3_iam_client.list_policies() - log_command_execution(self.iam_endpoint, "IAM List Policies", response) + response = self._exec_request( + self.boto3_iam_client.list_policies, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Policies"), f"Expected Policies in response:\n{response}" return response @reporter.step("Lists information about the versions of the specified managed policy") - @report_error def iam_list_policy_versions(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM List Policy Versions", response, {"PolicyArn": policy_arn}) + response = self._exec_request( + self.boto3_iam_client.list_policy_versions, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Versions"), f"Expected Versions in response:\n{response}" return response @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") - @report_error def iam_list_user_policies(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_user_policies(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List User Policies", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.list_user_policies, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM users") - @report_error def iam_list_users(self) -> dict: - response = self.boto3_iam_client.list_users() - log_command_execution(self.iam_endpoint, "IAM List Users", response) + response = self._exec_request( + self.boto3_iam_client.list_users, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Users"), f"Expected Users in response:\n{response}" return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") - @report_error def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["PolicyDocument"] = json.dumps(policy_document) - response = self.boto3_iam_client.put_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Put Group Policy", response, params) + response = self._exec_request( + self.boto3_iam_client.put_group_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") - @report_error def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["PolicyDocument"] = json.dumps(policy_document) - response = self.boto3_iam_client.put_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Put User Policy", response, params) + response = self._exec_request( + self.boto3_iam_client.put_user_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified user from the specified group") - @report_error def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.remove_user_from_group(**params) - log_command_execution(self.iam_endpoint, "IAM Remove User From Group", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.remove_user_from_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Updates the name and/or the path of the specified IAM group") - @report_error def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: params = {"GroupName": group_name, "NewGroupName": new_name, "NewPath": "/"} - response = self.boto3_iam_client.update_group(**params) - log_command_execution(self.iam_endpoint, "IAM Update Group", response, params) - return response + return self._exec_request( + self.boto3_iam_client.update_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Updates the name and/or the path of the specified IAM user") - @report_error def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: params = {"UserName": user_name, "NewUserName": new_name, "NewPath": "/"} - response = self.boto3_iam_client.update_user(**params) - log_command_execution(self.iam_endpoint, "IAM Update User", response, params) - return response + return self._exec_request( + self.boto3_iam_client.update_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Adds one or more tags to an IAM user") - @report_error def iam_tag_user(self, user_name: str, tags: list) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["Tags"] = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - response = self.boto3_iam_client.tag_user(**params) - log_command_execution(self.iam_endpoint, "IAM Tag User", response, params) - return response + return self._exec_request( + self.boto3_iam_client.tag_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("List tags of IAM user") - @report_error def iam_list_user_tags(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_user_tags(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List User Tags", response, {"UserName": user_name}) - return response + return self._exec_request( + self.boto3_iam_client.list_user_tags, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Removes the specified tags from the user") - @report_error def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.untag_user(**params) - log_command_execution(self.iam_endpoint, "IAM Untag User", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.untag_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 8e019ea..32e4346 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -9,13 +9,12 @@ import csv import json import logging import re -import subprocess import sys from contextlib import suppress from datetime import datetime from io import StringIO from textwrap import shorten -from typing import Dict, List, Optional, TypedDict, Union +from typing import Any, Optional, Union import pexpect @@ -75,22 +74,75 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date reporter.attach(command_attachment, "Command execution") -def log_command_execution(url: str, cmd: str, output: Union[str, dict], params: Optional[dict] = None) -> None: +def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[dict] = None, **kwargs) -> None: logger.info(f"{cmd}: {output}") - with suppress(Exception): - json_output = json.dumps(output, indent=4, sort_keys=True) - output = json_output + if not params: + params = {} + + output_params = params try: - json_params = json.dumps(params, indent=4, sort_keys=True) + json_params = json.dumps(params, indent=4, sort_keys=True, default=str) except TypeError as err: logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}") else: - params = json_params + output_params = json_params - command_attachment = f"COMMAND: '{cmd}'\n" f"URL: {url}\n" f"PARAMS:\n{params}\n" f"OUTPUT:\n{output}\n" - reporter.attach(command_attachment, "Command execution") + output = json.dumps(output, indent=4, sort_keys=True, default=str) + + command_execution = f"COMMAND: '{cmd}'\n" f"URL: {kwargs['endpoint']}\n" f"PARAMS:\n{output_params}\n" f"OUTPUT:\n{output}\n" + aws_command = _convert_request_to_aws_cli_command(cmd, params, **kwargs) + + reporter.attach(command_execution, "Command execution") + reporter.attach(aws_command, "AWS CLI Command") + + +def _convert_request_to_aws_cli_command(command: str, params: dict, **kwargs) -> str: + overriden_names = [_convert_json_name_to_aws_cli(name) for name in kwargs.keys()] + command = command.replace("_", "-") + options = [] + + for name, value in params.items(): + name = _convert_json_name_to_aws_cli(name) + + # To override parameters for AWS CLI + if name in overriden_names: + continue + + if option := _create_option(name, value): + options.append(option) + + for name, value in kwargs.items(): + name = _convert_json_name_to_aws_cli(name) + if option := _create_option(name, value): + options.append(option) + + options = " ".join(options) + api = "s3api" if "s3" in kwargs["endpoint"] else "iam" + return f"aws --no-verify-ssl --no-paginate {api} {command} {options}" + + +def _convert_json_name_to_aws_cli(name: str) -> str: + specific_names = {"CORSConfiguration": "cors-configuration"} + + if aws_cli_name := specific_names.get(name): + return aws_cli_name + return re.sub(r"([a-z])([A-Z])", r"\1 \2", name).lower().replace(" ", "-").replace("_", "-") + + +def _create_option(name: str, value: Any) -> str | None: + if isinstance(value, bool) and value: + return f"--{name}" + + if isinstance(value, dict): + value = json.dumps(value, indent=4, sort_keys=True, default=str) + return f"--{name} '{value}'" + + if value: + return f"--{name} {value}" + + return None def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: From 6f1baf3cf6384f7adeb300bd9d6c9406f4abdcf3 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Fri, 1 Nov 2024 15:50:17 +0300 Subject: [PATCH 23/81] [#312] update morph remove_nodes --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 7228692..2958884 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -341,7 +341,6 @@ class FrostfsAdmMorph(CliCommand): return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, - **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, ) def add_rule( From ea4094051413cf49b74277794a0e3b99221d05f6 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Tue, 5 Nov 2024 12:37:56 +0300 Subject: [PATCH 24/81] [#313] update force_new_epoch --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 2958884..f3e0137 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -122,7 +122,9 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def force_new_epoch(self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: + def force_new_epoch( + self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None + ) -> CommandResult: """Create new FrostFS epoch event in the side chain. Args: From 55d8ee5da0cc7113fe864ebfadd028234891bf98 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 8 Nov 2024 15:46:02 +0300 Subject: [PATCH 25/81] [#315] Add http client Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/__init__.py | 0 src/frostfs_testlib/http/http_client.py | 95 +++++++++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 src/frostfs_testlib/http/__init__.py create mode 100644 src/frostfs_testlib/http/http_client.py diff --git a/src/frostfs_testlib/http/__init__.py b/src/frostfs_testlib/http/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py new file mode 100644 index 0000000..261b2a6 --- /dev/null +++ b/src/frostfs_testlib/http/http_client.py @@ -0,0 +1,95 @@ +import json +import logging +import logging.config + +import httpx + +from frostfs_testlib import reporter + +timeout = httpx.Timeout(60, read=150) +LOGGING_CONFIG = { + "disable_existing_loggers": False, + "version": 1, + "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, + "formatters": { + "http": { + "format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + } + }, + "loggers": { + "httpx": { + "handlers": ["default"], + "level": "DEBUG", + }, + "httpcore": { + "handlers": ["default"], + "level": "ERROR", + }, + }, +} + +logging.config.dictConfig(LOGGING_CONFIG) +logger = logging.getLogger("NeoLogger") + + +class HttpClient: + @reporter.step("Send {method} request to {url}") + def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response: + transport = httpx.HTTPTransport(verify=False, retries=5) + client = httpx.Client(timeout=timeout, transport=transport) + response = client.request(method, url, **kwargs) + + self._attach_response(response) + logger.info(f"Response: {response.status_code} => {response.text}") + + if expected_status_code: + assert response.status_code == expected_status_code, ( + f"Got {response.status_code} response code" f" while {expected_status_code} expected" + ) + + return response + + def _attach_response(self, response: httpx.Response): + request = response.request + + try: + request_headers = json.dumps(dict(request.headers), indent=4) + except json.JSONDecodeError: + request_headers = str(request.headers) + + try: + request_body = request.read() + try: + request_body = request_body.decode("utf-8") + except UnicodeDecodeError as e: + request_body = f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}" + except Exception as e: + request_body = f"Error reading request body: {str(e)}" + + request_body = "" if request_body is None else request_body + + try: + response_headers = json.dumps(dict(response.headers), indent=4) + except json.JSONDecodeError: + response_headers = str(response.headers) + + report = ( + f"Method: {request.method}\n\n" + f"URL: {request.url}\n\n" + f"Request Headers: {request_headers}\n\n" + f"Request Body: {request_body}\n\n" + f"Response Status Code: {response.status_code}\n\n" + f"Response Headers: {response_headers}\n\n" + f"Response Body: {response.text}\n\n" + ) + curl_request = self._create_curl_request(request.url, request.method, request.headers, request_body) + + reporter.attach(report, "Requests Info") + reporter.attach(curl_request, "CURL") + + def _create_curl_request(self, url: str, method: str, headers: httpx.Headers, data: str) -> str: + headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) + data = f" -d '{data}'" if data else "" + # Option -k means no verify SSL + return f"curl {url} -X {method} {headers}{data} -k" From 95b32a036a8043191f3cec6dd249ee95fa1aa3a6 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 12 Nov 2024 12:28:10 +0300 Subject: [PATCH 26/81] [#316] Extend parallel exception message output Signed-off-by: a.berezin --- src/frostfs_testlib/testing/parallel.py | 38 ++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index 0549e61..6c4f6e0 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -1,4 +1,5 @@ import itertools +import traceback from concurrent.futures import Future, ThreadPoolExecutor from contextlib import contextmanager from typing import Callable, Collection, Optional, Union @@ -55,7 +56,42 @@ def parallel( # Check for exceptions exceptions = [future.exception() for future in futures if future.exception()] if exceptions: - message = "\n".join([str(e) for e in exceptions]) + # Prettify exception in parallel with all underlying stack traces + # For example, we had 3 RuntimeError exceptions during parallel. This format will give us something like + # + # RuntimeError: The following exceptions occured during parallel run: + # 1) Exception one text + # 2) Exception two text + # 3) Exception three text + # TRACES: + # ==== 1 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception one text") + # RuntimeError: Exception one text + # + # ==== 2 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception two text") + # RuntimeError: Exception two text + # + # ==== 3 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception three text") + # RuntimeError: Exception three text + short_summary = "\n".join([f"{i}) {str(e)}" for i, e in enumerate(exceptions, 1)]) + stack_traces = "\n".join( + [f"==== {i} ====\n{''.join(traceback.TracebackException.from_exception(e).format())}" for i, e in enumerate(exceptions, 1)] + ) + message = f"{short_summary}\nTRACES:\n{stack_traces}" raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}") return futures From 2a90ec74ff70934d65fa13e78c348afda3b195c2 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Tue, 12 Nov 2024 16:01:12 +0300 Subject: [PATCH 27/81] [#317] update morph rule chain --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index f3e0137..5e39cf4 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -353,6 +353,7 @@ class FrostfsAdmMorph(CliCommand): rule: Optional[list[str]] = None, path: Optional[str] = None, chain_id_hex: Optional[bool] = None, + chain_name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, timeout: Optional[str] = None, @@ -383,6 +384,7 @@ class FrostfsAdmMorph(CliCommand): target_name: str, target_type: str, chain_id_hex: Optional[bool] = None, + chain_name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, timeout: Optional[str] = None, @@ -410,6 +412,7 @@ class FrostfsAdmMorph(CliCommand): target_type: str, target_name: Optional[str] = None, rpc_endpoint: Optional[str] = None, + chain_name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, timeout: Optional[str] = None, @@ -436,6 +439,7 @@ class FrostfsAdmMorph(CliCommand): target_name: str, target_type: str, all: Optional[bool] = None, + chain_name: Optional[str] = None, chain_id_hex: Optional[bool] = None, wallet: Optional[str] = None, address: Optional[str] = None, From 47bc11835bb7869e2b87e761e432e923fcd90343 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 13 Nov 2024 10:10:35 +0300 Subject: [PATCH 28/81] [#318] Add tombstone expiration test Signed-off-by: a.berezin --- src/frostfs_testlib/hosting/docker_host.py | 3 ++ src/frostfs_testlib/hosting/interfaces.py | 11 +++++++ src/frostfs_testlib/resources/common.py | 1 + src/frostfs_testlib/storage/cluster.py | 6 ++-- .../controllers/cluster_state_controller.py | 21 ++++++++++++-- .../state_managers/config_state_manager.py | 29 ++++++++++++++----- .../storage/dataclasses/node_base.py | 12 ++++---- 7 files changed, 63 insertions(+), 20 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 5110e63..01dc6b5 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -164,6 +164,9 @@ class DockerHost(Host): return volume_path + def send_signal_to_service(self, service_name: str, signal: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_metabase(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index b84326a..6d1e5da 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -117,6 +117,17 @@ class Host(ABC): service_name: Name of the service to stop. """ + @abstractmethod + def send_signal_to_service(self, service_name: str, signal: str) -> None: + """Send signal to service with specified name using kill - + + The service must be hosted on this host. + + Args: + service_name: Name of the service to stop. + signal: signal name. See kill -l to all names + """ + @abstractmethod def mask_service(self, service_name: str) -> None: """Prevent the service from start by any activity by masking it. diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 1c93b12..53bcfaa 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -53,3 +53,4 @@ HOSTING_CONFIG_FILE = os.getenv( ) MORE_LOG = os.getenv("MORE_LOG", "1") +EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 9fcc4c9..3ec4922 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -11,10 +11,10 @@ from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.metrics import Metrics from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry -from frostfs_testlib.storage.dataclasses.metrics import Metrics class ClusterNode: @@ -91,10 +91,10 @@ class ClusterNode: config_str = yaml.dump(new_config) shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") - def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: + def config(self, service_type: ServiceClass) -> ServiceConfigurationYml: return self.service(service_type).config - def service(self, service_type: type[ServiceClass]) -> ServiceClass: + def service(self, service_type: ServiceClass) -> ServiceClass: """ Get a service cluster node of specified type. diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 53098b1..5080d40 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -172,6 +172,15 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Send sighup to all {service_type} services") + def sighup_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.send_signal_to_service for service in services], signal="SIGHUP") + + if service_type == StorageNode: + self.wait_after_storage_startup() + @wait_for_success(600, 60) def wait_s3gate(self, s3gate: S3Gate): with reporter.step(f"Wait for {s3gate} reconnection"): @@ -206,21 +215,27 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Stop {service_type} service on {node}") - def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): + def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True): service = node.service(service_type) service.stop_service(mask) self.stopped_services.add(service) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Send sighup to {service_type} service on {node}") + def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.send_signal_to_service("SIGHUP") + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Start {service_type} service on {node}") - def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): + def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass): service = node.service(service_type) service.start_service() self.stopped_services.discard(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Start all stopped {service_type} services") - def start_stopped_services_of_type(self, service_type: type[ServiceClass]): + def start_stopped_services_of_type(self, service_type: ServiceClass): stopped_svc = self._get_stopped_by_type(service_type) if not stopped_svc: return diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py index 66f72d6..f0b2a21 100644 --- a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -14,14 +14,19 @@ class ConfigStateManager(StateManager): self.cluster = self.csc.cluster @reporter.step("Change configuration for {service_type} on all nodes") - def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): + def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False): services = self.cluster.services(service_type) nodes = self.cluster.nodes(services) self.services_with_changed_config.update([(node, service_type) for node in nodes]) - self.csc.stop_services_of_type(service_type) + if not sighup: + self.csc.stop_services_of_type(service_type) + parallel([node.config(service_type).set for node in nodes], values=values) - self.csc.start_services_of_type(service_type) + if not sighup: + self.csc.start_services_of_type(service_type) + else: + self.csc.sighup_services_of_type(service_type) @reporter.step("Change configuration for {service_type} on {node}") def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): @@ -32,18 +37,26 @@ class ConfigStateManager(StateManager): self.csc.start_service_of_type(node, service_type) @reporter.step("Revert all configuration changes") - def revert_all(self): + def revert_all(self, sighup: bool = False): if not self.services_with_changed_config: return - parallel(self._revert_svc, self.services_with_changed_config) + parallel(self._revert_svc, self.services_with_changed_config, sighup) self.services_with_changed_config.clear() - self.csc.start_all_stopped_services() + if not sighup: + self.csc.start_all_stopped_services() # TODO: parallel can't have multiple parallel_items :( @reporter.step("Revert all configuration {node_and_service}") - def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): + def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False): node, service_type = node_and_service - self.csc.stop_service_of_type(node, service_type) + service = node.service(service_type) + + if not sighup: + self.csc.stop_service_of_type(node, service_type) + node.config(service_type).revert() + + if sighup: + service.send_signal_to_service("SIGHUP") diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 8291345..180877d 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -65,6 +65,10 @@ class NodeBase(HumanReadableABC): with reporter.step(f"Start {self.name} service on {self.host.config.address}"): self.host.start_service(self.name) + def send_signal_to_service(self, signal: str): + with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"): + self.host.send_signal_to_service(self.name, signal) + @abstractmethod def service_healthcheck(self) -> bool: """Service healthcheck.""" @@ -185,9 +189,7 @@ class NodeBase(HumanReadableABC): if attribute_name not in config.attributes: if default_attribute_name is None: - raise RuntimeError( - f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either" - ) + raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either") return config.attributes[default_attribute_name] @@ -197,9 +199,7 @@ class NodeBase(HumanReadableABC): return self.host.get_service_config(self.name) def get_service_uptime(self, service: str) -> datetime: - result = self.host.get_shell().exec( - f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2" - ) + result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2") start_time = parser.parse(result.stdout.strip()) current_time = datetime.now(tz=timezone.utc) active_time = current_time - start_time From f24bfc06fd04f0fc195135315d1d3a9c9828fcf8 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 13 Nov 2024 17:46:03 +0300 Subject: [PATCH 29/81] [#319] Add cached fixture feature Signed-off-by: a.berezin --- src/frostfs_testlib/resources/optionals.py | 11 +++--- src/frostfs_testlib/testing/test_control.py | 39 +++++++++++++++++++++ 2 files changed, 44 insertions(+), 6 deletions(-) diff --git a/src/frostfs_testlib/resources/optionals.py b/src/frostfs_testlib/resources/optionals.py index 2a7ff22..6caf158 100644 --- a/src/frostfs_testlib/resources/optionals.py +++ b/src/frostfs_testlib/resources/optionals.py @@ -16,11 +16,10 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD") OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true")) # Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped. -OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool( - os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true") -) +OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")) # Set this to False for disable autouse fixture like node healthcheck during developing time. -OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool( - os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true") -) +OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")) + +# Use cache for fixtures with @cachec_fixture decorator +OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false")) diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py index 4fa6390..bc38208 100644 --- a/src/frostfs_testlib/testing/test_control.py +++ b/src/frostfs_testlib/testing/test_control.py @@ -1,13 +1,16 @@ import inspect import logging +import os from functools import wraps from time import sleep, time from typing import Any +import yaml from _pytest.outcomes import Failed from pytest import fail from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.utils.func_utils import format_by_args logger = logging.getLogger("NeoLogger") @@ -128,6 +131,42 @@ def run_optionally(enabled: bool, mock_value: Any = True): return deco +def cached_fixture(enabled: bool): + """ + Decorator to cache fixtures. + MUST be placed after @pytest.fixture and before @allure decorators. + + Args: + enabled: if true, decorated func will be cached. + """ + + def deco(func): + @wraps(func) + def func_impl(*a, **kw): + # TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters + cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml") + + if enabled and os.path.exists(cache_file): + with open(cache_file, "r") as cache_input: + return yaml.load(cache_input, Loader=yaml.Loader) + + result = func(*a, **kw) + + if enabled: + with open(cache_file, "w") as cache_output: + yaml.dump(result, cache_output) + return result + + # TODO: cache yielding fixtures + @wraps(func) + def gen_impl(*a, **kw): + raise NotImplementedError("Not implemented for yielding fixtures") + + return gen_impl if inspect.isgeneratorfunction(func) else func_impl + + return deco + + def wait_for_success( max_wait_time: int = 60, interval: int = 1, From 451de5e07e7ef6dd68e684aaa431839583a82089 Mon Sep 17 00:00:00 2001 From: anurindm Date: Thu, 14 Nov 2024 16:22:06 +0300 Subject: [PATCH 30/81] [#320] Added shards detach function Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 82ea87b..68a2f54 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -241,3 +241,21 @@ class FrostfsCliShards(CliCommand): "control shards evacuation status", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None): + """ + Detach and close the shards + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + id: List of shard IDs in base58 encoding + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards detach", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) From a1953684b87f8c3d96f95a14ce98f59fdcab657b Mon Sep 17 00:00:00 2001 From: Roman Chernykh Date: Wed, 16 Oct 2024 18:42:42 +0300 Subject: [PATCH 31/81] [#307] added methods for testing MFA --- src/frostfs_testlib/s3/aws_cli_client.py | 87 ++++++++++++++++++++++++ src/frostfs_testlib/s3/boto3_client.py | 73 ++++++++++++++++++++ src/frostfs_testlib/s3/interfaces.py | 29 ++++++++ src/frostfs_testlib/utils/file_utils.py | 8 ++- 4 files changed, 195 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index ff4e329..ba95733 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -1440,3 +1440,90 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response + + # MFA METHODS + @reporter.step("Creates a new virtual MFA device") + def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple: + cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\ + --outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}" + + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") + assert serial_number, f"Expected SerialNumber in response:\n{response}" + + return serial_number, False + + @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes a virtual MFA device") + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\ + --authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Lists the MFA devices for an IAM user") + def iam_list_virtual_mfa_devices(self) -> dict: + cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" + + return response + + @reporter.step("Get session token for user") + def sts_get_session_token( + self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None + ) -> tuple: + cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}" + if duration_seconds: + cmd += f" --duration-seconds {duration_seconds}" + if serial_number: + cmd += f" --serial-number {serial_number}" + if token_code: + cmd += f" --token-code {token_code}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + access_key = response.get("Credentials", {}).get("AccessKeyId") + secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") + session_token = response.get("Credentials", {}).get("SessionToken") + assert access_key, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + assert session_token, f"Expected SessionToken in response:\n{response}" + + return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 91d8c5a..12113ad 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -41,6 +41,8 @@ class Boto3ClientWrapper(S3ClientWrapper): self.boto3_iam_client: S3Client = None self.iam_endpoint: str = "" + self.boto3_sts_client: S3Client = None + self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.profile = profile @@ -87,6 +89,14 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint_url=self.iam_endpoint, verify=False, ) + # since the STS does not have an enpoint, IAM is used + self.boto3_sts_client = self.session.client( + service_name="sts", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + endpoint_url=iam_endpoint, + verify=False, + ) def _to_s3_param(self, param: str) -> str: replacement_map = { @@ -1265,3 +1275,66 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) + + # MFA methods + @reporter.step("Creates a new virtual MFA device") + def iam_create_virtual_mfa_device( + self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None + ) -> tuple: + response = self.boto3_iam_client.create_virtual_mfa_device(VirtualMFADeviceName=virtual_mfa_device_name) + + serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") + base32StringSeed = response.get("VirtualMFADevice", {}).get("Base32StringSeed") + assert serial_number, f"Expected SerialNumber in response:\n{response}" + assert base32StringSeed, f"Expected Base32StringSeed in response:\n{response}" + + return serial_number, base32StringSeed + + @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + response = self.boto3_iam_client.deactivate_mfa_device(UserName=user_name, SerialNumber=serial_number) + + return response + + @reporter.step("Deletes a virtual MFA device") + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + response = self.boto3_iam_client.delete_virtual_mfa_device(SerialNumber=serial_number) + + return response + + @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + response = self.boto3_iam_client.enable_mfa_device( + UserName=user_name, + SerialNumber=serial_number, + AuthenticationCode1=authentication_code1, + AuthenticationCode2=authentication_code2, + ) + + return response + + @reporter.step("Lists the MFA devices for an IAM user") + def iam_list_virtual_mfa_devices(self) -> dict: + response = self.boto3_iam_client.list_virtual_mfa_devices() + assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" + + return response + + @reporter.step("Get session token for user") + def sts_get_session_token( + self, duration_seconds: Optional[str] = "", serial_number: Optional[str] = "", token_code: Optional[str] = "" + ) -> tuple: + response = self.boto3_sts_client.get_session_token( + DurationSeconds=duration_seconds, + SerialNumber=serial_number, + TokenCode=token_code, + ) + + access_key = response.get("Credentials", {}).get("AccessKeyId") + secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") + session_token = response.get("Credentials", {}).get("SessionToken") + assert access_key, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + assert session_token, f"Expected SessionToken in response:\n{response}" + + return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index c084484..69a5154 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -578,3 +578,32 @@ class S3ClientWrapper(HumanReadableABC): @abstractmethod def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: """Removes the specified tags from the user""" + + # MFA methods + @abstractmethod + def iam_create_virtual_mfa_device( + self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None + ) -> tuple: + """Creates a new virtual MFA device""" + + @abstractmethod + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + """Deactivates the specified MFA device and removes it from association with the user name""" + + @abstractmethod + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + """Deletes a virtual MFA device""" + + @abstractmethod + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + """Enables the specified MFA device and associates it with the specified IAM user""" + + @abstractmethod + def iam_list_virtual_mfa_devices(self) -> dict: + """Lists the MFA devices for an IAM user""" + + @abstractmethod + def sts_get_session_token( + self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None + ) -> tuple: + """Get session token for user""" diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index c2b497f..8839d7f 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -45,7 +45,7 @@ def ensure_directory_opener(path, flags): # TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps # Use object_size dt in future as argument @reporter.step("Generate file") -def generate_file(size: int) -> TestFile: +def generate_file(size: int, file_name: Optional[str] = None) -> TestFile: """Generates a binary file with the specified size in bytes. Args: @@ -54,7 +54,11 @@ def generate_file(size: int) -> TestFile: Returns: The path to the generated file. """ - test_file = TestFile(os.path.join(ASSETS_DIR, string_utils.unique_name("object-"))) + + if file_name is None: + file_name = string_utils.unique_name("object-") + + test_file = TestFile(os.path.join(ASSETS_DIR, file_name)) with open(test_file, "wb", opener=ensure_directory_opener) as file: file.write(os.urandom(size)) logger.info(f"File with size {size} bytes has been generated: {test_file}") From 8eaa511e5c39feaad06f7c3bf795639fcbbaac92 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 18 Nov 2024 16:57:14 +0300 Subject: [PATCH 32/81] [#322] Added classmethod decorator in Http client Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/http/http_client.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 261b2a6..3106273 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -50,7 +50,8 @@ class HttpClient: return response - def _attach_response(self, response: httpx.Response): + @classmethod + def _attach_response(cls, response: httpx.Response): request = response.request try: @@ -83,12 +84,13 @@ class HttpClient: f"Response Headers: {response_headers}\n\n" f"Response Body: {response.text}\n\n" ) - curl_request = self._create_curl_request(request.url, request.method, request.headers, request_body) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body) reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") - def _create_curl_request(self, url: str, method: str, headers: httpx.Headers, data: str) -> str: + @classmethod + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str) -> str: headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) data = f" -d '{data}'" if data else "" # Option -k means no verify SSL From 0c9660fffc43b6cbeecf119a4e1cb3008020c042 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 20 Nov 2024 17:14:33 +0300 Subject: [PATCH 33/81] [#323] Update APE related entities Signed-off-by: a.berezin --- src/frostfs_testlib/resources/error_patterns.py | 8 ++++++-- src/frostfs_testlib/storage/dataclasses/ape.py | 14 +++++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 3ba5f13..9b5e8e4 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -27,6 +27,10 @@ S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" -RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" +# Errors from node missing reasons if request was forwarded. Commenting for now +# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" +RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request" NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" -NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" +# Errors from node missing reasons if request was forwarded. Commenting for now +# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" +NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request" diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index f0f1758..ef2e1f2 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -26,6 +26,18 @@ class ObjectOperations(HumanReadableEnum): return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] +class ContainerOperations(HumanReadableEnum): + PUT = "container.put" + GET = "container.get" + LIST = "container.list" + DELETE = "container.delete" + WILDCARD_ALL = "container.*" + + @staticmethod + def get_all(): + return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] + + @dataclass class Operations: GET_CONTAINER = "GetContainer" @@ -124,7 +136,7 @@ class Rule: if not operations: self.operations = [] - elif isinstance(operations, ObjectOperations): + elif isinstance(operations, (ObjectOperations, ContainerOperations)): self.operations = [operations] else: self.operations = operations From 24e1dfef282b46e40c900711563e3f69b24220cb Mon Sep 17 00:00:00 2001 From: Roman Chernykh Date: Mon, 18 Nov 2024 13:01:26 +0300 Subject: [PATCH 34/81] [#324]Extension list_objects method --- src/frostfs_testlib/s3/aws_cli_client.py | 13 +++++++++++-- src/frostfs_testlib/s3/boto3_client.py | 11 +++++++++-- src/frostfs_testlib/s3/interfaces.py | 4 +++- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index ba95733..2ac6d68 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -196,11 +196,20 @@ class AwsCliClient(S3ClientWrapper): return response.get("LocationConstraint") @reporter.step("List objects S3") - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} " + if page_size: + cmd = cmd.replace("--no-paginate", "") + cmd += f" --page-size {page_size} " + if prefix: + cmd += f" --prefix {prefix}" + if self.profile: + cmd += f" --profile {self.profile} " output = self.local_shell.exec(cmd).stdout response = self._to_json(output) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 12113ad..e7f2c35 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -398,10 +398,17 @@ class Boto3ClientWrapper(S3ClientWrapper): return response if full_output else obj_list @reporter.step("List objects S3") - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: + params = {"Bucket": bucket} + if page_size: + params["MaxKeys"] = page_size + if prefix: + params["Prefix"] = prefix response = self._exec_request( self.boto3_client.list_objects, - params={"Bucket": bucket}, + params, endpoint=self.s3gate_endpoint, profile=self.profile, ) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 69a5154..c3d99eb 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -195,7 +195,9 @@ class S3ClientWrapper(HumanReadableABC): """ @abstractmethod - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: """Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application From 3dc7a5bdb095dbf02c9942f6844540efcccf1b88 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 28 Nov 2024 16:43:46 +0300 Subject: [PATCH 35/81] [#328] Change logic activating split-brain Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/network.py | 18 ++++---- .../controllers/cluster_state_controller.py | 41 +++++++++++++------ 2 files changed, 38 insertions(+), 21 deletions(-) diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py index efaaf5a..6bde2f1 100644 --- a/src/frostfs_testlib/steps/network.py +++ b/src/frostfs_testlib/steps/network.py @@ -4,16 +4,18 @@ from frostfs_testlib.storage.cluster import ClusterNode class IpHelper: @staticmethod - def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: + def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None: shell = node.host.get_shell() - for ip in block_ip: - shell.exec(f"ip route add blackhole {ip}") + for ip, table in block_ip: + if not table: + shell.exec(f"ip r a blackhole {ip}") + continue + shell.exec(f"ip r a blackhole {ip} table {table}") @staticmethod def restore_input_traffic_to_node(node: ClusterNode) -> None: shell = node.host.get_shell() - unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False)) - if unlock_ip.return_code != 0: - return - for ip in unlock_ip.stdout.strip().split("\n"): - shell.exec(f"ip route del blackhole {ip.split(' ')[1]}") + unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout + + for active_blackhole in unlock_ip.strip().split("\n"): + shell.exec(f"ip r d {active_blackhole}") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 5080d40..67e4d60 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,4 +1,5 @@ import datetime +import itertools import logging import time from typing import TypeVar @@ -39,7 +40,7 @@ class ClusterStateController: def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} - self.dropped_traffic: list[ClusterNode] = [] + self.dropped_traffic: set[ClusterNode] = set() self.excluded_from_netmap: list[StorageNode] = [] self.stopped_services: set[NodeBase] = set() self.cluster = cluster @@ -325,22 +326,22 @@ class ClusterStateController: @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: - list_ip = self._parse_interfaces(block_nodes, name_interface) - IpHelper.drop_input_traffic_to_node(node, list_ip) + interfaces_tables = self._parse_interfaces(block_nodes, name_interface) + IpHelper.drop_input_traffic_to_node(node, interfaces_tables) time.sleep(wakeup_timeout) - self.dropped_traffic.append(node) + self.dropped_traffic.add(node) @reporter.step("Start traffic to {node}") def restore_traffic(self, node: ClusterNode) -> None: IpHelper.restore_input_traffic_to_node(node=node) - index = self.dropped_traffic.index(node) - self.dropped_traffic.pop(index) + self.dropped_traffic.discard(node) @reporter.step("Restore blocked nodes") def restore_all_traffic(self): if not self.dropped_traffic: return parallel(self._restore_traffic_to_node, self.dropped_traffic) + self.dropped_traffic.clear() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Hard reboot host {node} via magic SysRq option") @@ -516,17 +517,31 @@ class ClusterStateController: return disk_controller + @reporter.step("Restore traffic {node}") def _restore_traffic_to_node(self, node): IpHelper.restore_input_traffic_to_node(node) - def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str): - interfaces = [] + def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]: + interfaces_and_tables = set() for node in nodes: - dict_interfaces = node.host.config.interfaces - for type, ip in dict_interfaces.items(): - if name_interface in type: - interfaces.append(ip) - return interfaces + shell = node.host.get_shell() + lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines() + + ips = [] + tables = [] + + for line in lines: + if "src" not in line or "table local" in line: + continue + parts = line.split() + ips.append(parts[-1]) + if "table" in line: + tables.append(parts[parts.index("table") + 1]) + tables.append(None) + + [interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)] + + return interfaces_and_tables @reporter.step("Ping node") def _ping_host(self, node: ClusterNode): From 7d6768c83ff9d8169f1f73b01ae51b639db6c1cd Mon Sep 17 00:00:00 2001 From: anurindm Date: Thu, 28 Nov 2024 17:10:43 +0300 Subject: [PATCH 36/81] [#325] Added get nns records method to frostfs-adm Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 23 ++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 5e39cf4..bdf4a91 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -463,3 +463,26 @@ class FrostfsAdmMorph(CliCommand): "morph ape rm-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def get_nns_records( + self, + name: str, + type: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + alphabet_wallets: Optional[str] = None, + ) -> CommandResult: + """Returns domain record of the specified type + + Args: + name: Domain name + type: Domain name service record type(A|CNAME|SOA|TXT) + rpc_endpoint: N3 RPC node endpoint + alphabet_wallets: path to alphabet wallets dir + + Returns: + Command's result + """ + return self._execute( + "morph nns get-records", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) From 0e040d2722526c3a7ea092f6167b5324a87170f0 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Mon, 2 Dec 2024 14:18:17 +0300 Subject: [PATCH 37/81] [#330] Improve CURL generation and fix Boto3 logging Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 100 +++++++++++++++++------- src/frostfs_testlib/utils/cli_utils.py | 3 + 2 files changed, 76 insertions(+), 27 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 3106273..0d1e0bd 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -1,6 +1,8 @@ +import io import json import logging import logging.config +from typing import IO import httpx @@ -40,7 +42,7 @@ class HttpClient: client = httpx.Client(timeout=timeout, transport=transport) response = client.request(method, url, **kwargs) - self._attach_response(response) + self._attach_response(response, **kwargs) logger.info(f"Response: {response.status_code} => {response.text}") if expected_status_code: @@ -51,47 +53,91 @@ class HttpClient: return response @classmethod - def _attach_response(cls, response: httpx.Response): - request = response.request - + def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None: try: - request_headers = json.dumps(dict(request.headers), indent=4) - except json.JSONDecodeError: - request_headers = str(request.headers) - - try: - request_body = request.read() - try: - request_body = request_body.decode("utf-8") - except UnicodeDecodeError as e: - request_body = f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}" + content = readable.read() except Exception as e: - request_body = f"Error reading request body: {str(e)}" + logger.warning(f"Unable to read file: {str(e)}") + return None - request_body = "" if request_body is None else request_body + if not content: + return None + + request_body = None try: - response_headers = json.dumps(dict(response.headers), indent=4) - except json.JSONDecodeError: - response_headers = str(response.headers) + request_body = json.loads(content) + except (json.JSONDecodeError, UnicodeDecodeError) as e: + logger.warning(f"Unable to convert body to json: {str(e)}") + + if request_body is not None: + return json.dumps(request_body, default=str, indent=4) + + try: + request_body = content.decode() + except UnicodeDecodeError as e: + logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}") + + request_body = content if request_body is None else request_body + request_body = "" if len(request_body) > 1000 else request_body + + return request_body + + @classmethod + def _parse_files(cls, files: dict | None) -> str | None: + if not files: + return None + + filepaths = {} + + for name, file in files.items(): + if isinstance(file, io.IOBase): + filepaths[name] = file.name + + if isinstance(file, tuple): + filepaths[name] = file[1].name + + return json.dumps(filepaths, default=str, indent=4) + + @classmethod + def _attach_response(cls, response: httpx.Response, **kwargs): + request = response.request + request_headers = json.dumps(dict(request.headers), default=str, indent=4) + request_body = cls._parse_body(request) + + files = kwargs.get("files") + request_files = cls._parse_files(files) + + response_headers = json.dumps(dict(response.headers), default=str, indent=4) + response_body = cls._parse_body(response) report = ( f"Method: {request.method}\n\n" - f"URL: {request.url}\n\n" - f"Request Headers: {request_headers}\n\n" - f"Request Body: {request_body}\n\n" - f"Response Status Code: {response.status_code}\n\n" - f"Response Headers: {response_headers}\n\n" - f"Response Body: {response.text}\n\n" + + f"URL: {request.url}\n\n" + + f"Request Headers: {request_headers}\n\n" + + (f"Request Body: {request_body}\n\n" if request_body else "") + + (f"Request Files: {request_files}\n\n" if request_files else "") + + f"Response Status Code: {response.status_code}\n\n" + + f"Response Headers: {response_headers}\n\n" + + (f"Response Body: {response_body}\n\n" if response_body else "") ) - curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, files) reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") @classmethod - def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str) -> str: + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict = None) -> str: headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) data = f" -d '{data}'" if data else "" + + if files: + for name, file in files.items(): + if isinstance(file, io.IOBase): + data += f' -F "{name}=@{file.name}"' + + if isinstance(file, tuple): + data += f' -F "{name}=@{file[1].name}"' + # Option -k means no verify SSL return f"curl {url} -X {method} {headers}{data} -k" diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 32e4346..0f9fef2 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -80,6 +80,9 @@ def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[d if not params: params = {} + if params.get("Body") and len(params.get("Body")) > 1000: + params["Body"] = "" + output_params = params try: From 8ec7e21e8450167d02875b3255ab9140f60facb2 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 3 Dec 2024 14:55:12 +0300 Subject: [PATCH 38/81] [#331] Fix type hints for service methods Signed-off-by: a.berezin --- src/frostfs_testlib/storage/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 3ec4922..b67e34d 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -91,10 +91,10 @@ class ClusterNode: config_str = yaml.dump(new_config) shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") - def config(self, service_type: ServiceClass) -> ServiceConfigurationYml: + def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: return self.service(service_type).config - def service(self, service_type: ServiceClass) -> ServiceClass: + def service(self, service_type: type[ServiceClass]) -> ServiceClass: """ Get a service cluster node of specified type. From b3d05c5c28ab6727e3e56bdba7de05e8ed9fb6b1 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 15 Nov 2024 21:01:34 +0300 Subject: [PATCH 39/81] [#326] Automation of PATCH method in GRPC Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/frostfs_cli/object.py | 47 +++++++++++ src/frostfs_testlib/storage/constants.py | 2 + .../storage/dataclasses/ape.py | 1 + .../grpc_operations/implementations/object.py | 83 +++++++++++++++++++ .../storage/grpc_operations/interfaces.py | 32 +++++++ 5 files changed, 165 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 1857987..0c00563 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -276,6 +276,53 @@ class FrostfsCliObject(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) + def patch( + self, + rpc_endpoint: str, + cid: str, + oid: str, + range: list[str] = None, + payload: list[str] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ttl: Optional[int] = None, + wallet: Optional[str] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + PATCH an object. + + Args: + rpc_endpoint: Remote node address (as 'multiaddr' or ':') + cid: Container ID + oid: Object ID + range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] + payload: An array of file paths to be applied in each range + new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2 + replace_attrs: Replace all attributes completely with new ones specified in new_attrs + address: Address of wallet account + bearer: File with signed JSON or binary encoded bearer token + generate_key: Generate new private key + session: Filepath to a JSON- or binary-encoded token of the object RANGE session + timeout: Timeout for the operation + trace: Generate trace ID and print it + ttl: TTL value in request meta header (default 2) + wallet: WIF (NEP-2) string or path to the wallet or binary key + xhdr: Dict with request X-Headers + Returns: + (str): ID of patched Object + """ + return self._execute( + "object patch", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + def range( self, rpc_endpoint: str, diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 2cffd3a..39c6b66 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -23,4 +23,6 @@ class PlacementRule: DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" + REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X" DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" + EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X" diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index ef2e1f2..b7b5dfc 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -13,6 +13,7 @@ FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 class ObjectOperations(HumanReadableEnum): PUT = "object.put" + PATCH = "object.patch" GET = "object.get" HEAD = "object.head" GET_RANGE = "object.range" diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index 0e14aec..f31f223 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -206,6 +206,11 @@ class ObjectOperations(interfaces.ObjectInterface): hash_type=hash_type, timeout=timeout, ) + + if range: + # Cut off the range and return only hash + return result.stdout.split(":")[1].strip() + return result.stdout @reporter.step("Head object") @@ -407,6 +412,57 @@ class ObjectOperations(interfaces.ObjectInterface): oid = id_str.split(":")[1] return oid.strip() + @reporter.step("Patch object") + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: list[str] = None, + payloads: list[str] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + trace: bool = False, + ) -> str: + """ + PATCH an object. + + Args: + cid: ID of Container where we get the Object from + oid: Object ID + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] + payloads: An array of file paths to be applied in each range + new_attrs: Attributes to be changed in the format "key1=value1,key2=value2" + replace_attrs: Replace all attributes completely with new ones specified in new_attrs + bearer: Path to Bearer Token file, appends to `--bearer` key + xhdr: Request X-Headers in form of Key=Value + session: Path to a JSON-encoded container session token + timeout: Timeout for the operation + trace: Generate trace ID and print it + Returns: + (str): ID of patched Object + """ + result = self.cli.object.patch( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=ranges, + payload=payloads, + new_attrs=new_attrs, + replace_attrs=replace_attrs, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + trace=trace, + ) + return result.stdout.split(":")[1].strip() + @reporter.step("Put object to random node") def put_to_random_node( self, @@ -622,3 +678,30 @@ class ObjectOperations(interfaces.ObjectInterface): ] return object_nodes + + @reporter.step("Search parts of object") + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[str]: + endpoint = alive_node.storage_node.get_rpc_endpoint() + response = self.cli.object.nodes( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + json=True, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + response_json = json.loads(response.stdout) + return [data_object["object_id"] for data_object in response_json["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py index c293c2d..07fe52f 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -198,6 +198,24 @@ class ObjectInterface(ABC): ) -> str: pass + @abstractmethod + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: Optional[list[str]] = None, + payloads: Optional[list[str]] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ) -> str: + pass + @abstractmethod def put_to_random_node( self, @@ -264,6 +282,20 @@ class ObjectInterface(ABC): ) -> List[ClusterNode]: pass + @abstractmethod + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[str]: + pass + class ContainerInterface(ABC): @abstractmethod From 61353cb38c723a3d3513de96a4ae7f142ed3c637 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 5 Dec 2024 14:17:25 +0300 Subject: [PATCH 40/81] [#332] Fix `files` param in http client Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 0d1e0bd..6008989 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -2,7 +2,7 @@ import io import json import logging import logging.config -from typing import IO +from typing import Mapping, Sequence import httpx @@ -84,13 +84,20 @@ class HttpClient: return request_body @classmethod - def _parse_files(cls, files: dict | None) -> str | None: + def _parse_files(cls, files: Mapping | Sequence | None) -> str | None: if not files: return None filepaths = {} - for name, file in files.items(): + if isinstance(files, Sequence): + items = files + elif isinstance(files, Mapping): + items = files.items() + else: + raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}") + + for name, file in items: if isinstance(file, io.IOBase): filepaths[name] = file.name From ee7d9df4a9eddf7da21b66a2070227c0aaa71ad2 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 5 Dec 2024 16:34:36 +0300 Subject: [PATCH 41/81] [#333] Fix `files` param in http client part two Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 26 ++++++++++--------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 6008989..a3e3e54 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -84,12 +84,12 @@ class HttpClient: return request_body @classmethod - def _parse_files(cls, files: Mapping | Sequence | None) -> str | None: - if not files: - return None - + def _parse_files(cls, files: Mapping | Sequence | None) -> dict: filepaths = {} + if not files: + return filepaths + if isinstance(files, Sequence): items = files elif isinstance(files, Mapping): @@ -100,11 +100,10 @@ class HttpClient: for name, file in items: if isinstance(file, io.IOBase): filepaths[name] = file.name - - if isinstance(file, tuple): + elif isinstance(file, Sequence): filepaths[name] = file[1].name - return json.dumps(filepaths, default=str, indent=4) + return filepaths @classmethod def _attach_response(cls, response: httpx.Response, **kwargs): @@ -128,23 +127,18 @@ class HttpClient: + f"Response Headers: {response_headers}\n\n" + (f"Response Body: {response_body}\n\n" if response_body else "") ) - curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, files) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files) reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") @classmethod - def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict = None) -> str: + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) data = f" -d '{data}'" if data else "" - if files: - for name, file in files.items(): - if isinstance(file, io.IOBase): - data += f' -F "{name}=@{file.name}"' - - if isinstance(file, tuple): - data += f' -F "{name}=@{file[1].name}"' + for name, path in files.items(): + data += f' -F "{name}=@{path}"' # Option -k means no verify SSL return f"curl {url} -X {method} {headers}{data} -k" From 0ebb8453290b26f85bce7091dd6ea307df5f0d9a Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 6 Dec 2024 10:50:34 +0300 Subject: [PATCH 42/81] [#335] Fixed iam boto3 client --- src/frostfs_testlib/s3/boto3_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index e7f2c35..c680f17 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -86,6 +86,7 @@ class Boto3ClientWrapper(S3ClientWrapper): service_name="iam", aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, + region_name=self.region, endpoint_url=self.iam_endpoint, verify=False, ) From 8ff1e72499f49054b7cf0d8fd05f87b040e5d32f Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Fri, 13 Dec 2024 10:45:14 +0300 Subject: [PATCH 43/81] [#337] Add rule chain error Signed-off-by: Ekaterina Chernitsyna --- src/frostfs_testlib/resources/error_patterns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 9b5e8e4..4c22648 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -9,6 +9,7 @@ OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" SESSION_NOT_FOUND = "code = 4096.*message = session token not found" OUT_OF_RANGE = "code = 2053.*message = out of range" EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" +ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied" # TODO: Change to codes with message # OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" # LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed From cd15be3b7c41448280217aac741f2fc1efefac95 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 15 Nov 2024 21:03:21 +0300 Subject: [PATCH 44/81] [#334] Automation of PATCH method in S3 Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 6 +- src/frostfs_testlib/s3/aws_cli_client.py | 7 +- src/frostfs_testlib/s3/boto3_client.py | 7 +- src/frostfs_testlib/s3/interfaces.py | 4 +- src/frostfs_testlib/s3/s3_http_client.py | 127 ++++++++++++++++++++++ src/frostfs_testlib/steps/s3/s3_helper.py | 24 ++++ 6 files changed, 162 insertions(+), 13 deletions(-) create mode 100644 src/frostfs_testlib/s3/s3_http_client.py diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index a3e3e54..c3e5fae 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -46,9 +46,9 @@ class HttpClient: logger.info(f"Response: {response.status_code} => {response.text}") if expected_status_code: - assert response.status_code == expected_status_code, ( - f"Got {response.status_code} response code" f" while {expected_status_code} expected" - ) + assert ( + response.status_code == expected_status_code + ), f"Got {response.status_code} response code while {expected_status_code} expected" return response diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 2ac6d68..4196c77 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -171,7 +171,7 @@ class AwsCliClient(S3ClientWrapper): return response.get("TagSet") @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> list: + def get_bucket_acl(self, bucket: str) -> dict: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' @@ -179,8 +179,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Grants") + return self._to_json(output) @reporter.step("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: @@ -861,7 +860,7 @@ class AwsCliClient(S3ClientWrapper): return response["Parts"] @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index c680f17..6b6c74e 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -230,14 +230,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("TagSet") @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> list: - response = self._exec_request( + def get_bucket_acl(self, bucket: str) -> dict: + return self._exec_request( self.boto3_client.get_bucket_acl, params={"Bucket": bucket}, endpoint=self.s3gate_endpoint, profile=self.profile, ) - return response.get("Grants") @reporter.step("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: @@ -705,7 +704,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["Parts"] @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] params = self._convert_to_s3_params(locals(), exclude=["parts"]) params["MultipartUpload"] = {"Parts": parts} diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index c3d99eb..7ce9f31 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -128,7 +128,7 @@ class S3ClientWrapper(HumanReadableABC): """Deletes the tags from the bucket.""" @abstractmethod - def get_bucket_acl(self, bucket: str) -> list: + def get_bucket_acl(self, bucket: str) -> dict: """This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket.""" @abstractmethod @@ -336,7 +336,7 @@ class S3ClientWrapper(HumanReadableABC): """Lists the parts that have been uploaded for a specific multipart upload.""" @abstractmethod - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: """Completes a multipart upload by assembling previously uploaded parts.""" @abstractmethod diff --git a/src/frostfs_testlib/s3/s3_http_client.py b/src/frostfs_testlib/s3/s3_http_client.py new file mode 100644 index 0000000..a34c380 --- /dev/null +++ b/src/frostfs_testlib/s3/s3_http_client.py @@ -0,0 +1,127 @@ +import hashlib +import logging +import xml.etree.ElementTree as ET + +import httpx +from botocore.auth import SigV4Auth +from botocore.awsrequest import AWSRequest +from botocore.credentials import Credentials + +from frostfs_testlib import reporter +from frostfs_testlib.http.http_client import HttpClient +from frostfs_testlib.utils.file_utils import TestFile + +logger = logging.getLogger("NeoLogger") + +DEFAULT_TIMEOUT = 60.0 + + +class S3HttpClient: + def __init__( + self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" + ) -> None: + self.http_client = HttpClient() + self.s3gate_endpoint = s3gate_endpoint + self.credentials = Credentials(access_key_id, secret_access_key) + self.profile = profile + self.region = region + self.service = "s3" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + + def _to_s3_header(self, header: str) -> dict: + replacement_map = { + "Acl": "ACL", + "_": "-", + } + + result = header + if not header.startswith("x_amz"): + result = header.title() + + for find, replace in replacement_map.items(): + result = result.replace(find, replace) + + return result + + def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None): + exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] + return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None} + + def _create_aws_request( + self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None + ) -> AWSRequest: + data = b"" + + if content is not None: + if isinstance(content, TestFile): + with open(content, "rb") as io_content: + data = io_content.read() + elif isinstance(content, str): + data = bytes(content, encoding="utf-8") + elif isinstance(content, bytes): + data = content + else: + raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}") + + headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest() + aws_request = AWSRequest(method, url, headers, data, params) + self.signature.add_auth(aws_request) + + return aws_request + + def _exec_request( + self, + method: str, + url: str, + headers: dict, + content: str | bytes | TestFile = None, + params: dict = None, + timeout: float = DEFAULT_TIMEOUT, + ) -> dict: + aws_request = self._create_aws_request(method, url, headers, content, params) + response = self.http_client.send( + aws_request.method, + aws_request.url, + headers=dict(aws_request.headers), + data=aws_request.data, + params=aws_request.params, + timeout=timeout, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError: + raise httpx.HTTPStatusError(response.text, request=response.request, response=response) + + root = ET.fromstring(response.read()) + data = { + "LastModified": root.find(".//LastModified").text, + "ETag": root.find(".//ETag").text, + } + + if response.headers.get("x-amz-version-id"): + data["VersionId"] = response.headers.get("x-amz-version-id") + + return data + + @reporter.step("Patch object S3") + def patch_object( + self, + bucket: str, + key: str, + content: str | bytes | TestFile, + content_range: str, + version_id: str = None, + if_match: str = None, + if_unmodified_since: str = None, + x_amz_expected_bucket_owner: str = None, + timeout: float = DEFAULT_TIMEOUT, + ) -> dict: + if content_range and not content_range.startswith("bytes"): + content_range = f"bytes {content_range}/*" + + url = f"{self.s3gate_endpoint}/{bucket}/{key}" + headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"]) + params = {"VersionId": version_id} if version_id is not None else None + + return self._exec_request("PATCH", url, headers, content, params, timeout=timeout) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index dbf48d3..7949f2d 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -12,6 +12,7 @@ from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.utils.file_utils import TestFile, get_file_hash logger = logging.getLogger("NeoLogger") @@ -185,3 +186,26 @@ def search_nodes_with_bucket( break nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) return nodes_list + + +def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int: + if isinstance(value, int): + return value + + if "part" not in value and "object" not in value: + return int(value) + + if object_size is not None: + value = value.replace("object", str(object_size)) + + if part_size is not None: + value = value.replace("part", str(part_size)) + + return int(eval(value)) + + +def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int: + start, end = rng.split(":") + start = get_bytes_relative_to_object(start, object_size, part_size) + end = get_bytes_relative_to_object(end, object_size, part_size) + return (start, end) if int_values else f"bytes {start}-{end}/*" From cc7bd4ffc9dd59115144bdd4cf81ff07ffe8b372 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 17 Dec 2024 13:55:15 +0300 Subject: [PATCH 45/81] [#339] Added ns args for func container create Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/container.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 809b39a..db896ce 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -111,6 +111,8 @@ def create_container( options: Optional[dict] = None, await_mode: bool = True, wait_for_creation: bool = True, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> str: """ @@ -143,6 +145,8 @@ def create_container( result = cli.container.create( rpc_endpoint=endpoint, policy=rule, + nns_name=nns_name, + nns_zone=nns_zone, basic_acl=basic_acl, attributes=attributes, name=name, From 335eed85b152e2e8ac147bc95cc2af88beaad7ff Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 16 Dec 2024 22:06:00 +0300 Subject: [PATCH 46/81] [#338] Added parameter word_count to method get_logs Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/hosting/docker_host.py | 1 + src/frostfs_testlib/hosting/interfaces.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 01dc6b5..d458b0a 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -250,6 +250,7 @@ class DockerHost(Host): unit: Optional[str] = None, exclude_filter: Optional[str] = None, priority: Optional[str] = None, + word_count: bool = None, ) -> str: client = self._get_docker_client() filtered_logs = "" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 6d1e5da..f58d856 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -324,6 +324,7 @@ class Host(ABC): unit: Optional[str] = None, exclude_filter: Optional[str] = None, priority: Optional[str] = None, + word_count: bool = None, ) -> str: """Get logs from host filtered by regex. @@ -334,6 +335,7 @@ class Host(ABC): unit: required unit. priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. + word_count: output type, expected values: lines, bytes, json Returns: Found entries as str if any found. From dc5a9e7bb9336a9b331c119a09615e68f4703d01 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 17 Dec 2024 18:16:54 +0300 Subject: [PATCH 47/81] [#340] Move s3 and http directories to avoid conflict with requests Signed-off-by: Kirill Sosnovskikh --- pyproject.toml | 2 +- src/frostfs_testlib/clients/__init__.py | 5 +++++ src/frostfs_testlib/{ => clients}/http/__init__.py | 0 src/frostfs_testlib/{ => clients}/http/http_client.py | 0 src/frostfs_testlib/clients/s3/__init__.py | 1 + src/frostfs_testlib/{ => clients}/s3/aws_cli_client.py | 2 +- src/frostfs_testlib/{ => clients}/s3/boto3_client.py | 2 +- src/frostfs_testlib/{ => clients}/s3/curl_bucket_resolver.py | 2 +- src/frostfs_testlib/{ => clients}/s3/interfaces.py | 0 src/frostfs_testlib/{ => clients}/s3/s3_http_client.py | 2 +- src/frostfs_testlib/s3/__init__.py | 3 --- src/frostfs_testlib/steps/cli/container.py | 2 -- src/frostfs_testlib/steps/http/__init__.py | 0 src/frostfs_testlib/steps/{http => }/http_gate.py | 2 +- src/frostfs_testlib/steps/{s3 => }/s3_helper.py | 4 +--- .../storage/grpc_operations/implementations/container.py | 2 +- tests/test_dataclasses.py | 2 +- 17 files changed, 15 insertions(+), 16 deletions(-) create mode 100644 src/frostfs_testlib/clients/__init__.py rename src/frostfs_testlib/{ => clients}/http/__init__.py (100%) rename src/frostfs_testlib/{ => clients}/http/http_client.py (100%) create mode 100644 src/frostfs_testlib/clients/s3/__init__.py rename src/frostfs_testlib/{ => clients}/s3/aws_cli_client.py (99%) rename src/frostfs_testlib/{ => clients}/s3/boto3_client.py (99%) rename src/frostfs_testlib/{ => clients}/s3/curl_bucket_resolver.py (88%) rename src/frostfs_testlib/{ => clients}/s3/interfaces.py (100%) rename src/frostfs_testlib/{ => clients}/s3/s3_http_client.py (98%) delete mode 100644 src/frostfs_testlib/s3/__init__.py delete mode 100644 src/frostfs_testlib/steps/http/__init__.py rename src/frostfs_testlib/steps/{http => }/http_gate.py (99%) rename src/frostfs_testlib/steps/{s3 => }/s3_helper.py (97%) diff --git a/pyproject.toml b/pyproject.toml index 3faa637..2778f8a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,7 +62,7 @@ authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3Credentia wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider" [project.entry-points."frostfs.testlib.bucket_cid_resolver"] -frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver" +frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver" [tool.isort] profile = "black" diff --git a/src/frostfs_testlib/clients/__init__.py b/src/frostfs_testlib/clients/__init__.py new file mode 100644 index 0000000..e46766b --- /dev/null +++ b/src/frostfs_testlib/clients/__init__.py @@ -0,0 +1,5 @@ +from frostfs_testlib.clients.http.http_client import HttpClient +from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper +from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient diff --git a/src/frostfs_testlib/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py similarity index 100% rename from src/frostfs_testlib/http/__init__.py rename to src/frostfs_testlib/clients/http/__init__.py diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py similarity index 100% rename from src/frostfs_testlib/http/http_client.py rename to src/frostfs_testlib/clients/http/http_client.py diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py new file mode 100644 index 0000000..65a3990 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py similarity index 99% rename from src/frostfs_testlib/s3/aws_cli_client.py rename to src/frostfs_testlib/clients/s3/aws_cli_client.py index 4196c77..3496b2b 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -6,8 +6,8 @@ from time import sleep from typing import Literal, Optional, Union from frostfs_testlib import reporter +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.utils import string_utils diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py similarity index 99% rename from src/frostfs_testlib/s3/boto3_client.py rename to src/frostfs_testlib/clients/s3/boto3_client.py index 6b6c74e..53e7ffa 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -13,8 +13,8 @@ from botocore.exceptions import ClientError from mypy_boto3_s3 import S3Client from frostfs_testlib import reporter +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.utils import string_utils # TODO: Refactor this code to use shell instead of _cmd_run diff --git a/src/frostfs_testlib/s3/curl_bucket_resolver.py b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py similarity index 88% rename from src/frostfs_testlib/s3/curl_bucket_resolver.py rename to src/frostfs_testlib/clients/s3/curl_bucket_resolver.py index b713e79..4d845cf 100644 --- a/src/frostfs_testlib/s3/curl_bucket_resolver.py +++ b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.cli.generic_cli import GenericCli -from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.clients.s3 import BucketContainerResolver from frostfs_testlib.storage.cluster import ClusterNode diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py similarity index 100% rename from src/frostfs_testlib/s3/interfaces.py rename to src/frostfs_testlib/clients/s3/interfaces.py diff --git a/src/frostfs_testlib/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py similarity index 98% rename from src/frostfs_testlib/s3/s3_http_client.py rename to src/frostfs_testlib/clients/s3/s3_http_client.py index a34c380..b83e7a8 100644 --- a/src/frostfs_testlib/s3/s3_http_client.py +++ b/src/frostfs_testlib/clients/s3/s3_http_client.py @@ -8,7 +8,7 @@ from botocore.awsrequest import AWSRequest from botocore.credentials import Credentials from frostfs_testlib import reporter -from frostfs_testlib.http.http_client import HttpClient +from frostfs_testlib.clients import HttpClient from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") diff --git a/src/frostfs_testlib/s3/__init__.py b/src/frostfs_testlib/s3/__init__.py deleted file mode 100644 index 32426c2..0000000 --- a/src/frostfs_testlib/s3/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from frostfs_testlib.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index db896ce..092b1a3 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -7,9 +7,7 @@ from typing import Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC -from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node from frostfs_testlib.storage.cluster import Cluster, ClusterNode diff --git a/src/frostfs_testlib/steps/http/__init__.py b/src/frostfs_testlib/steps/http/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http_gate.py similarity index 99% rename from src/frostfs_testlib/steps/http/http_gate.py rename to src/frostfs_testlib/steps/http_gate.py index 117cded..4e712c1 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -12,8 +12,8 @@ import requests from frostfs_testlib import reporter from frostfs_testlib.cli import GenericCli +from frostfs_testlib.clients.s3.aws_cli_client import command_options from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE -from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3_helper.py similarity index 97% rename from src/frostfs_testlib/steps/s3/s3_helper.py rename to src/frostfs_testlib/steps/s3_helper.py index 7949f2d..c3092df 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3_helper.py @@ -6,13 +6,11 @@ from typing import Optional from dateutil.parser import parse from frostfs_testlib import reporter -from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus -from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.utils.file_utils import TestFile, get_file_hash logger = logging.getLogger("NeoLogger") diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index 7a637d7..86cac26 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -5,9 +5,9 @@ from typing import List, Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.clients.s3 import BucketContainerResolver from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.utils import json_utils diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index 19f3832..677aed4 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -2,7 +2,7 @@ from typing import Any import pytest -from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper +from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper from frostfs_testlib.storage.dataclasses.acl import EACLRole from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.object_size import ObjectSize From 0479701258ba115fce9ee3e91783b112b473a4ca Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Wed, 18 Dec 2024 17:35:14 +0300 Subject: [PATCH 48/81] [#341] Add test for multipart object in Test_http_object testsuite Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/steps/http_gate.py | 48 ++++++++++---------------- 1 file changed, 19 insertions(+), 29 deletions(-) diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http_gate.py index 4e712c1..51b0301 100644 --- a/src/frostfs_testlib/steps/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -38,34 +38,34 @@ def get_via_http_gate( """ This function gets given object from HTTP gate cid: container id to get object from - oid: object ID + oid: object id / object key node: node to make request request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] """ - # if `request_path` parameter omitted, use default - if request_path is None: - request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - else: + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" + if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False) + response = requests.get(request, stream=True, timeout=timeout, verify=False) - if not resp.ok: + if not response.ok: raise Exception( f"""Failed to get object via HTTP gate: - request: {resp.request.path_url}, - response: {resp.text}, - headers: {resp.headers}, - status code: {resp.status_code} {resp.reason}""" + request: {response.request.path_url}, + response: {response.text}, + headers: {response.headers}, + status code: {response.status_code} {response.reason}""" ) logger.info(f"Request: {request}") - _attach_allure_step(request, resp.status_code) + _attach_allure_step(request, response.status_code) test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) with open(test_file, "wb") as file: - shutil.copyfileobj(resp.raw, file) + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + return test_file @@ -117,12 +117,12 @@ def get_via_http_gate_by_attribute( endpoint: http gate endpoint request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] """ + attr_name = list(attribute.keys())[0] attr_value = quote_plus(str(attribute.get(attr_name))) - # if `request_path` parameter ommited, use default - if request_path is None: - request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" - else: + + request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" resp = requests.get(request, stream=True, timeout=timeout, verify=False) @@ -357,19 +357,9 @@ def try_to_get_object_via_passed_request_and_expect_error( ) -> None: try: if attrs is None: - get_via_http_gate( - cid=cid, - oid=oid, - node=node, - request_path=http_request_path, - ) + get_via_http_gate(cid, oid, node, http_request_path) else: - get_via_http_gate_by_attribute( - cid=cid, - attribute=attrs, - node=node, - request_path=http_request_path, - ) + get_via_http_gate_by_attribute(cid, attrs, node, http_request_path) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() From 6e951443edbb822e5cc7ac5a4b32b341cb114634 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 24 Dec 2024 11:16:38 +0300 Subject: [PATCH 49/81] [#342] Remove try-catch from delete block Signed-off-by: a.berezin --- .../implementations/container.py | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index 86cac26..75af00c 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -181,20 +181,17 @@ class ContainerOperations(interfaces.ContainerInterface): force: bool = False, trace: bool = False, ): - try: - return self.cli.container.delete( - rpc_endpoint=endpoint, - cid=cid, - address=address, - await_mode=await_mode, - session=session, - ttl=ttl, - xhdr=xhdr, - force=force, - trace=trace, - ).stdout - except RuntimeError as e: - print(f"Error request:\n{e}") + return self.cli.container.delete( + rpc_endpoint=endpoint, + cid=cid, + address=address, + await_mode=await_mode, + session=session, + ttl=ttl, + xhdr=xhdr, + force=force, + trace=trace, + ).stdout @reporter.step("Get container") def get( From 9e3380d519be5f59279e5530b1e0a84a89286bb8 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Tue, 10 Dec 2024 15:42:13 +0300 Subject: [PATCH 50/81] [#336] Refine CODEOWNERS settings Signed-off-by: Vitaliy Potyarkin --- CODEOWNERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 4a621d3..519ca42 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1,3 @@ -* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov +.* @TrueCloudLab/qa-committers +.forgejo/.* @potyarkin +Makefile @potyarkin From 0a3de927a2cf2c89c7d29f633083ef079f773cbc Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 10 Dec 2024 11:47:25 +0300 Subject: [PATCH 51/81] [#343] Extend testsuites for PATCH method Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/frostfs_cli/object.py | 3 +- .../clients/s3/s3_http_client.py | 28 +++++++++++++++++-- .../storage/dataclasses/ape.py | 1 + 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 0c00563..e536544 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -315,8 +315,9 @@ class FrostfsCliObject(CliCommand): ttl: TTL value in request meta header (default 2) wallet: WIF (NEP-2) string or path to the wallet or binary key xhdr: Dict with request X-Headers + Returns: - (str): ID of patched Object + Command's result. """ return self._execute( "object patch", diff --git a/src/frostfs_testlib/clients/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py index b83e7a8..f6f423d 100644 --- a/src/frostfs_testlib/clients/s3/s3_http_client.py +++ b/src/frostfs_testlib/clients/s3/s3_http_client.py @@ -21,12 +21,16 @@ class S3HttpClient: self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.http_client = HttpClient() - self.s3gate_endpoint = s3gate_endpoint self.credentials = Credentials(access_key_id, secret_access_key) self.profile = profile self.region = region - self.service = "s3" - self.signature = SigV4Auth(self.credentials, self.service, self.region) + + self.iam_endpoint: str = None + self.s3gate_endpoint: str = None + self.service: str = None + self.signature: SigV4Auth = None + + self.set_endpoint(s3gate_endpoint) def _to_s3_header(self, header: str) -> dict: replacement_map = { @@ -104,6 +108,24 @@ class S3HttpClient: return data + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + if self.s3gate_endpoint == s3gate_endpoint: + return + + self.s3gate_endpoint = s3gate_endpoint + self.service = "s3" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + + @reporter.step("Set endpoint IAM to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + if self.iam_endpoint == iam_endpoint: + return + + self.iam_endpoint = iam_endpoint + self.service = "iam" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + @reporter.step("Patch object S3") def patch_object( self, diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index b7b5dfc..1199435 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -52,6 +52,7 @@ class Operations: SEARCH_OBJECT = "SearchObject" HEAD_OBJECT = "HeadObject" PUT_OBJECT = "PutObject" + PATCH_OBJECT = "PatchObject" class Verb(HumanReadableEnum): From 6fe7fef44b100b976c5a72aad76477a277975b05 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 25 Dec 2024 19:25:14 +0300 Subject: [PATCH 52/81] [#344] Update ifaces Signed-off-by: a.berezin --- src/frostfs_testlib/cli/netmap_parser.py | 4 ++-- src/frostfs_testlib/steps/cli/object.py | 6 +++++- .../storage/controllers/cluster_state_controller.py | 8 +++++--- .../storage/grpc_operations/implementations/chunks.py | 6 +++--- .../storage/grpc_operations/implementations/object.py | 3 ++- 5 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 23ac4da..db6f55f 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -85,7 +85,7 @@ class NetmapParser: @staticmethod def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip] + snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.get_interface(Interfaces.MGMT)] if not snapshot_node: return None return snapshot_node[0] diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index f28de06..7f8391d 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -12,6 +12,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils @@ -752,7 +753,10 @@ def get_object_nodes( ] object_nodes = [ - cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) ] return object_nodes diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 67e4d60..3a10ded 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -19,7 +19,7 @@ from frostfs_testlib.steps.node_management import include_node_to_network_map, r from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success @@ -454,9 +454,11 @@ class ClusterStateController: if not checker_node: checker_node = cluster_node netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) - netmap = [node for node in netmap if cluster_node.host_ip == node.node] + netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node] if status == NodeStatus.OFFLINE: - assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" + assert ( + cluster_node.get_interface(Interfaces.MGMT) not in netmap + ), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline" else: assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index 7f3161c..ad45855 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -6,7 +6,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.cli_utils import parse_netmap_output @@ -30,7 +30,7 @@ class ChunksOperations(interfaces.ChunksInterface): result = [] for node_info in netmap: for cluster_node in cluster.cluster_nodes: - if node_info.node == cluster_node.host_ip: + if node_info.node == cluster_node.get_interface(Interfaces.MGMT): result.append(cluster_node) return result @@ -40,7 +40,7 @@ class ChunksOperations(interfaces.ChunksInterface): for node_info in netmap: if node_info.node_id in chunk.confirmed_nodes: for cluster_node in cluster.cluster_nodes: - if cluster_node.host_ip == node_info.node: + if cluster_node.get_interface(Interfaces.MGMT) == node_info.node: return (cluster_node, node_info) @wait_for_success(300, 5, fail_testcase=None) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index f31f223..be8a470 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -11,6 +11,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations from frostfs_testlib.testing.test_control import wait_for_success @@ -674,7 +675,7 @@ class ObjectOperations(interfaces.ObjectInterface): cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.host_ip + if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) ] return object_nodes From 974836f1bd91a3fc567b7d64b853f051e53d7cec Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 13 Jan 2025 12:58:29 +0300 Subject: [PATCH 53/81] [#346] Added correct exception in Chunks parse Signed-off-by: Dmitriy Zayakin --- .../storage/grpc_operations/implementations/chunks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index ad45855..0d787e2 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -161,5 +161,5 @@ class ChunksOperations(interfaces.ChunksInterface): def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: parse_result = json.loads(object_nodes) if parse_result.get("errors"): - raise parse_result["errors"] + raise RuntimeError(", ".join(parse_result["errors"])) return [Chunk(**chunk) for chunk in parse_result["data_objects"]] From 5a291c5b7f9374a7f9c8b479158024e73459616d Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Mon, 13 Jan 2025 16:32:47 +0300 Subject: [PATCH 54/81] [#347] remove stderr check Signed-off-by: m.malygina --- src/frostfs_testlib/processes/remote_process.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 5624940..071675a 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -193,7 +193,7 @@ class RemoteProcess: ) if "No such file or directory" in terminal.stderr: return None - elif terminal.stderr or terminal.return_code != 0: + elif terminal.return_code != 0: raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") return terminal.stdout From daf186690beff8d4f8bafbbdfa7aedd1c458317d Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 10 Jan 2025 14:29:03 +0300 Subject: [PATCH 55/81] [#345] Fix curl request generation Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/clients/http/http_client.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py index c3e5fae..aebd5ef 100644 --- a/src/frostfs_testlib/clients/http/http_client.py +++ b/src/frostfs_testlib/clients/http/http_client.py @@ -134,9 +134,10 @@ class HttpClient: @classmethod def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: - headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) - data = f" -d '{data}'" if data else "" + excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"} + headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers) + data = f" -d '{data}'" if data else "" for name, path in files.items(): data += f' -F "{name}=@{path}"' From 80dd8d0b169dbbbd875c03b753f119ad2fce382a Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Wed, 15 Jan 2025 16:31:54 +0300 Subject: [PATCH 56/81] [#348] Fixed check of fields in S3 aws/boto3 methods related to policies Signed-off-by: y.lukoyanova --- src/frostfs_testlib/clients/s3/aws_cli_client.py | 12 ++++++------ src/frostfs_testlib/clients/s3/boto3_client.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index 3496b2b..accc289 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -1227,7 +1227,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @@ -1239,7 +1239,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @@ -1264,7 +1264,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response @@ -1276,7 +1276,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @@ -1288,7 +1288,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @@ -1324,7 +1324,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 53e7ffa..890b4e9 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -1091,7 +1091,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") @@ -1102,7 +1102,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") @@ -1127,7 +1127,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM groups") @@ -1137,7 +1137,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") @@ -1148,7 +1148,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists all the managed policies that are available in your AWS account") @@ -1180,7 +1180,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM users") From aed20e02accb3656ebf2b480fa7b884de6768f7d Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 17 Jan 2025 17:37:51 +0300 Subject: [PATCH 57/81] [#349] Fixed hook pytest-collect-modifyitems Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/__init__.py | 2 +- src/frostfs_testlib/hooks.py | 19 +++++++++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 1ceb972..e557a79 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,4 +1,4 @@ __version__ = "2.0.1" from .fixtures import configure_testlib, hosting, temp_directory -from .hooks import pytest_collection_modifyitems +from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index 6830e78..1ada660 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -1,8 +1,8 @@ import pytest -@pytest.hookimpl -def pytest_collection_modifyitems(items: list[pytest.Item]): +@pytest.hookimpl(specname="pytest_collection_modifyitems") +def pytest_add_frostfs_marker(items: list[pytest.Item]): # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding # nodeid = full path of the test # 1. plugins @@ -11,3 +11,18 @@ def pytest_collection_modifyitems(items: list[pytest.Item]): location = item.location[0] if "frostfs" in location and "plugin" not in location and "testlib" not in location: item.add_marker("frostfs") + + +# pytest hook. Do not rename +@pytest.hookimpl(trylast=True) +def pytest_collection_modifyitems(items: list[pytest.Item]): + # Change order of tests based on @pytest.mark.order() marker + def order(item: pytest.Item) -> int: + order_marker = item.get_closest_marker("order") + if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)): + raise RuntimeError("Incorrect usage of pytest.mark.order") + + order_value = order_marker.args[0] if order_marker else 0 + return order_value + + items.sort(key=lambda item: order(item)) From 0015ea7f93a1a102cd08fbbd5276bc9ca508c620 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 23 Jan 2025 17:46:47 +0300 Subject: [PATCH 58/81] [#350] Add ape rule for load config Signed-off-by: a.berezin --- src/frostfs_testlib/load/load_config.py | 4 ++- tests/test_load_config.py | 39 +++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 15103e0..3830203 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -182,8 +182,10 @@ class Preset(MetaConfig): pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) # Workers count for preset workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) - # Acl for container/buckets + # TODO: Deprecated. Acl for container/buckets acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) + # APE rule for containers instead of deprecated ACL + rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list) # ------ GRPC ------ # Amount of containers which should be created diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 883b1f2..fbeb587 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -6,10 +6,7 @@ import pytest from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME -from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController -from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode -from frostfs_testlib.storage.dataclasses.node_base import NodeBase @dataclass @@ -129,6 +126,8 @@ class TestLoadConfig: "--size '11'", "--acl 'acl'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -161,6 +160,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -317,6 +318,8 @@ class TestLoadConfig: "--no-verify-ssl", "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -350,6 +353,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -415,6 +420,26 @@ class TestLoadConfig: self._check_preset_params(load_params, params) + @pytest.mark.parametrize( + "load_type, input, value, params", + [ + (LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]), + (LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]), + (LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]), + (LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]), + (LoadType.gRPC, None, None, []), + (LoadType.S3, ["A C ", " B"], ["A C", "B"], []), + (LoadType.S3, None, None, []), + ], + ) + def test_ape_list_parsing_formatter(self, load_type, input, value, params): + load_params = LoadParams(load_type) + load_params.preset = Preset() + load_params.preset.rule = input + assert load_params.preset.rule == value + + self._check_preset_params(load_params, params) + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): expected_env_vars = { @@ -444,6 +469,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -475,6 +502,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -582,6 +611,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -613,6 +644,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", From ace9564243b8e7c4740c296dcfe0f55a06e719cd Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 30 Jan 2025 11:16:23 +0300 Subject: [PATCH 59/81] [#352] Fix versions parsing Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/utils/version_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 490abb0..0676085 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -64,7 +64,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: try: result = shell.exec(f"{binary_path} {binary['param']}") version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" - versions_at_host[binary_name] = version + versions_at_host[binary_name] = version.strip() except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") versions_at_host[binary_name] = "Unknown" From b44705eb2fd23ca0db313b07e8b5616367ce0d8f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 30 Jan 2025 14:38:22 +0300 Subject: [PATCH 60/81] [#353] Added Netmap command for CliWrapper Signed-off-by: Dmitriy Zayakin --- requirements.txt | 1 + src/frostfs_testlib/cli/frostfs_cli/netmap.py | 4 + src/frostfs_testlib/cli/netmap_parser.py | 29 +- .../dataclasses/storage_object_info.py | 36 +- .../grpc_operations/client_wrappers.py | 12 +- .../implementations/__init__.py | 4 + .../grpc_operations/implementations/netmap.py | 171 +++++++ .../storage/grpc_operations/interfaces.py | 424 ------------------ .../grpc_operations/interfaces/__init__.py | 4 + .../grpc_operations/interfaces/chunks.py | 79 ++++ .../grpc_operations/interfaces/container.py | 125 ++++++ .../grpc_operations/interfaces/netmap.py | 89 ++++ .../grpc_operations/interfaces/object.py | 223 +++++++++ .../grpc_operations/interfaces_wrapper.py | 10 + 14 files changed, 770 insertions(+), 441 deletions(-) create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py delete mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/container.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/object.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py diff --git a/requirements.txt b/requirements.txt index e012366..a0bcc11 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ tenacity==8.0.1 pytest==7.1.2 boto3==1.35.30 boto3-stubs[essential]==1.35.30 +pydantic==2.10.6 # Dev dependencies black==22.8.0 diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index d219940..cd197d3 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -12,6 +12,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -42,6 +43,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -73,6 +75,7 @@ class FrostfsCliNetmap(CliCommand): generate_key: bool = False, json: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -104,6 +107,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index db6f55f..2c97c3a 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeNetInfo, NodeNetmapInfo, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -20,8 +20,6 @@ class NetmapParser: "withdrawal_fee": r"Withdrawal fee: (?P\d+)", "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", - "eigen_trust_alpha": r"EigenTrustAlpha: (?P\d+\w+$)", - "eigen_trust_iterations": r"EigenTrustIterations: (?P\d+)", } parse_result = {} @@ -64,7 +62,7 @@ class NetmapParser: for node in netmap_nodes: for key, regex in regexes.items(): search_result = re.search(regex, node, flags=re.MULTILINE) - if search_result == None: + if search_result is None: result_netmap[key] = None continue if key == "node_data_ips": @@ -83,9 +81,22 @@ class NetmapParser: return dataclasses_netmap @staticmethod - def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: + def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None: snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.get_interface(Interfaces.MGMT)] - if not snapshot_node: - return None - return snapshot_node[0] + for snapshot in snapshot_nodes: + for endpoint in snapshot.external_address: + if rpc_endpoint.split(":")[0] in endpoint: + return snapshot + + @staticmethod + def node_info(output: dict) -> NodeNetmapInfo: + data_dict = {"attributes": {}} + + for key, value in output.items(): + if key != "attributes": + data_dict[key] = value + + for attribute in output["attributes"]: + data_dict["attributes"][attribute["key"]] = attribute["value"] + + return NodeInfo(**data_dict) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 55a8388..4c303fc 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,6 +1,9 @@ +import re from dataclasses import dataclass from typing import Optional +from pydantic import BaseModel, Field, field_validator + from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum @@ -75,8 +78,37 @@ class NodeNetInfo: withdrawal_fee: str = None homomorphic_hashing_disabled: str = None maintenance_mode_allowed: str = None - eigen_trust_alpha: str = None - eigen_trust_iterations: str = None + + +class Attributes(BaseModel): + cluster_name: str = Field(alias="ClusterName") + continent: str = Field(alias="Continent") + country: str = Field(alias="Country") + country_code: str = Field(alias="CountryCode") + external_addr: list[str] = Field(alias="ExternalAddr") + location: str = Field(alias="Location") + node: str = Field(alias="Node") + subdiv: str = Field(alias="SubDiv") + subdiv_code: str = Field(alias="SubDivCode") + un_locode: str = Field(alias="UN-LOCODE") + role: str = Field(alias="role") + + @field_validator("external_addr", mode="before") + @classmethod + def convert_external_addr(cls, value: str) -> list[str]: + return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)] + + +class NodeInfo(BaseModel): + public_key: str = Field(alias="publicKey") + addresses: list[str] = Field(alias="addresses") + state: str = Field(alias="state") + attributes: Attributes = Field(alias="attributes") + + @field_validator("addresses", mode="before") + @classmethod + def convert_external_addr(cls, value: str) -> list[str]: + return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))] @dataclass diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py index 8cef23b..c1e3a31 100644 --- a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py +++ b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py @@ -1,14 +1,14 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.storage.grpc_operations import interfaces -from frostfs_testlib.storage.grpc_operations.implementations import container, object +from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper -class CliClientWrapper(interfaces.GrpcClientWrapper): +class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper): def __init__(self, cli: FrostfsCli) -> None: self.cli = cli - self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli) - self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli) + self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli) + self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli) + self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli) -class RpcClientWrapper(interfaces.GrpcClientWrapper): +class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper): pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py index e69de29..18e8ae5 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py @@ -0,0 +1,4 @@ +from .chunks import ChunksOperations +from .container import ContainerOperations +from .netmap import NetmapOperations +from .object import ObjectOperations diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py new file mode 100644 index 0000000..905171b --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py @@ -0,0 +1,171 @@ +import json as module_json +from typing import List, Optional + +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.cli.netmap_parser import NetmapParser +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo + +from .. import interfaces + + +class NetmapOperations(interfaces.NetmapInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + def epoch( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> int: + """ + Get current epoch number. + """ + output = ( + self.cli.netmap.epoch( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return int(output) + + def netinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetInfo: + """ + Get target node info. + """ + output = ( + self.cli.netmap.netinfo( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.netinfo(output) + + def nodeinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> NodeNetmapInfo: + """ + Get target node info. + """ + output = ( + self.cli.netmap.nodeinfo( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + json=json, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.node_info(module_json.loads(output)) + + def snapshot( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[NodeNetmapInfo]: + """ + Get target node info. + """ + output = ( + self.cli.netmap.snapshot( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.snapshot_all_nodes(output) + + def snapshot_one_node( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[NodeNetmapInfo]: + """ + Get target one node info. + """ + output = ( + self.cli.netmap.snapshot( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.snapshot_one_node(output, rpc_endpoint) diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py deleted file mode 100644 index 07fe52f..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ /dev/null @@ -1,424 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, List, Optional - -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.constants import PlacementRule -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo -from frostfs_testlib.utils import file_utils - - -class ChunksInterface(ABC): - @abstractmethod - def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: - pass - - @abstractmethod - def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: - pass - - @abstractmethod - def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: - pass - - @abstractmethod - def get_all( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> list[Chunk]: - pass - - @abstractmethod - def get_parity( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass - - @abstractmethod - def get_first_data( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass - - -class ObjectInterface(ABC): - def __init__(self) -> None: - self.chunks: ChunksInterface - - @abstractmethod - def delete( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def get( - self, - cid: str, - oid: str, - endpoint: str, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> file_utils.TestFile: - pass - - @abstractmethod - def get_from_random_node( - self, - cid: str, - oid: str, - cluster: Cluster, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def hash( - self, - endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - range: Optional[str] = None, - salt: Optional[str] = None, - ttl: Optional[int] = None, - session: Optional[str] = None, - hash_type: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def head( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - json_output: bool = True, - is_raw: bool = False, - is_direct: bool = False, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult | Any: - pass - - @abstractmethod - def lock( - self, - cid: str, - oid: str, - endpoint: str, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def put( - self, - path: str, - cid: str, - endpoint: str, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def patch( - self, - cid: str, - oid: str, - endpoint: str, - ranges: Optional[list[str]] = None, - payloads: Optional[list[str]] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - bearer: Optional[str] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - trace: bool = False, - ) -> str: - pass - - @abstractmethod - def put_to_random_node( - self, - path: str, - cid: str, - cluster: Cluster, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def range( - self, - cid: str, - oid: str, - range_cut: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> tuple[file_utils.TestFile, bytes]: - pass - - @abstractmethod - def search( - self, - cid: str, - endpoint: str, - bearer: str = "", - oid: Optional[str] = None, - filters: Optional[dict] = None, - expected_objects_list: Optional[list] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - phy: bool = False, - root: bool = False, - timeout: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - ttl: Optional[int] = None, - ) -> List: - pass - - @abstractmethod - def nodes( - self, - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - pass - - @abstractmethod - def parts( - self, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[str]: - pass - - -class ContainerInterface(ABC): - @abstractmethod - def create( - self, - endpoint: str, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - basic_acl: Optional[str] = None, - await_mode: bool = False, - disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, - name: Optional[str] = None, - nonce: Optional[str] = None, - policy: Optional[str] = None, - session: Optional[str] = None, - subnet: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - """ - Create a new container and register it in the FrostFS. - It will be stored in the sidechain when the Inner Ring accepts it. - """ - raise NotImplementedError("No implemethed method create") - - @abstractmethod - def delete( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - await_mode: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - force: bool = False, - trace: bool = False, - ) -> List[str]: - """ - Delete an existing container. - Only the owner of the container has permission to remove the container. - """ - raise NotImplementedError("No implemethed method delete") - - @abstractmethod - def get( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - json_mode: bool = True, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get container field info.""" - raise NotImplementedError("No implemethed method get") - - @abstractmethod - def get_eacl( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - json_mode: bool = True, - trace: bool = False, - to: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get extended ACL table of container.""" - raise NotImplementedError("No implemethed method get-eacl") - - @abstractmethod - def list( - self, - endpoint: str, - name: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - owner: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - **params, - ) -> List[str]: - """List all created containers.""" - raise NotImplementedError("No implemethed method list") - - @abstractmethod - def nodes( - self, - endpoint: str, - cid: str, - cluster: Cluster, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - """Show the nodes participating in the container in the current epoch.""" - raise NotImplementedError("No implemethed method nodes") - - -class GrpcClientWrapper(ABC): - def __init__(self) -> None: - self.object: ObjectInterface - self.container: ContainerInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py new file mode 100644 index 0000000..17b3e9c --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py @@ -0,0 +1,4 @@ +from .chunks import ChunksInterface +from .container import ContainerInterface +from .netmap import NetmapInterface +from .object import ObjectInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py new file mode 100644 index 0000000..986b938 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py @@ -0,0 +1,79 @@ +from abc import ABC, abstractmethod +from typing import Optional + +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo + + +class ChunksInterface(ABC): + @abstractmethod + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + pass + + @abstractmethod + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + pass + + @abstractmethod + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + pass + + @abstractmethod + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + pass + + @abstractmethod + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + @abstractmethod + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py new file mode 100644 index 0000000..d5e3eeb --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py @@ -0,0 +1,125 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from frostfs_testlib.storage.cluster import Cluster, ClusterNode + + +class ContainerInterface(ABC): + @abstractmethod + def create( + self, + endpoint: str, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, + attributes: Optional[dict] = None, + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, + name: Optional[str] = None, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + """ + Create a new container and register it in the FrostFS. + It will be stored in the sidechain when the Inner Ring accepts it. + """ + raise NotImplementedError("No implemethed method create") + + @abstractmethod + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ) -> List[str]: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + """ + raise NotImplementedError("No implemethed method delete") + + @abstractmethod + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get container field info.""" + raise NotImplementedError("No implemethed method get") + + @abstractmethod + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get extended ACL table of container.""" + raise NotImplementedError("No implemethed method get-eacl") + + @abstractmethod + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + **params, + ) -> List[str]: + """List all created containers.""" + raise NotImplementedError("No implemethed method list") + + @abstractmethod + def nodes( + self, + endpoint: str, + cid: str, + cluster: Cluster, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + """Show the nodes participating in the container in the current epoch.""" + raise NotImplementedError("No implemethed method nodes") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py new file mode 100644 index 0000000..3f0a341 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py @@ -0,0 +1,89 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo + + +class NetmapInterface(ABC): + @abstractmethod + def epoch( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = False, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> int: + """ + Get current epoch number. + """ + raise NotImplementedError("No implemethed method epoch") + + @abstractmethod + def netinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetInfo: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method netinfo") + + @abstractmethod + def nodeinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetmapInfo: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method nodeinfo") + + @abstractmethod + def snapshot( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[NodeNetmapInfo]: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method snapshot") + + @abstractmethod + def snapshot_one_node( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[NodeNetmapInfo]: + """ + Get target one node info. + """ + raise NotImplementedError("No implemethed method snapshot") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py new file mode 100644 index 0000000..550c461 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py @@ -0,0 +1,223 @@ +from abc import ABC, abstractmethod +from typing import Any, List, Optional + +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.utils import file_utils + +from .chunks import ChunksInterface + + +class ObjectInterface(ABC): + def __init__(self) -> None: + self.chunks: ChunksInterface + + @abstractmethod + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> file_utils.TestFile: + pass + + @abstractmethod + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def hash( + self, + endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult | Any: + pass + + @abstractmethod + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: Optional[list[str]] = None, + payloads: Optional[list[str]] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ) -> str: + pass + + @abstractmethod + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> tuple[file_utils.TestFile, bytes]: + pass + + @abstractmethod + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + oid: Optional[str] = None, + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, + ) -> List: + pass + + @abstractmethod + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + pass + + @abstractmethod + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[str]: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py new file mode 100644 index 0000000..6574012 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py @@ -0,0 +1,10 @@ +from abc import ABC + +from . import interfaces + + +class GrpcClientWrapper(ABC): + def __init__(self) -> None: + self.object: interfaces.ObjectInterface + self.container: interfaces.ContainerInterface + self.netmap: interfaces.NetmapInterface From 87afc4b58c070d35643f95efd0e5db27eeb6fab6 Mon Sep 17 00:00:00 2001 From: Dmitry Anurin Date: Tue, 4 Feb 2025 10:03:58 +0300 Subject: [PATCH 61/81] [#356] Added pprof endpoint and working dir to service attributes Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/storage/constants.py | 2 ++ src/frostfs_testlib/storage/dataclasses/node_base.py | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 39c6b66..2e49208 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -5,6 +5,7 @@ class ConfigAttributes: WALLET_CONFIG = "wallet_config" CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" + WORKING_DIR = "working_dir" SHARD_CONFIG_PATH = "shard_config_path" LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" @@ -15,6 +16,7 @@ class ConfigAttributes: ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_PROMETHEUS = "endpoint_prometheus" + ENDPOINT_PPROF = "endpoint_pprof" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 180877d..5c8b723 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -82,6 +82,9 @@ class NodeBase(HumanReadableABC): def get_metrics_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) + def get_pprof_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF) + def stop_service(self, mask: bool = True): if mask: with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): @@ -144,6 +147,13 @@ class NodeBase(HumanReadableABC): else None ) + def get_working_dir_path(self) -> Optional[str]: + """ + Returns working directory path located on remote host + """ + config_attributes = self.host.get_service_config(self.name) + return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None + @property def config_dir(self) -> str: return self._get_attribute(ConfigAttributes.CONFIG_DIR) From e9bc36b3d3063043e2b754fbccbde53e93e3785a Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 4 Feb 2025 16:39:34 +0300 Subject: [PATCH 62/81] [#355] Change CSC time methods Signed-off-by: Dmitriy Zayakin --- .../controllers/cluster_state_controller.py | 34 +++++-------------- 1 file changed, 8 insertions(+), 26 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 3a10ded..6370033 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,7 +1,7 @@ -import datetime import itertools import logging import time +from datetime import datetime, timezone from typing import TypeVar import frostfs_testlib.resources.optionals as optionals @@ -390,31 +390,23 @@ class ClusterStateController: @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: shell = node.host.get_shell() - return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") + return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") @reporter.step("Set node time to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() - shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") - shell.exec("hwclock --systohc") + in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") + shell.exec(f"timedatectl set-time '{in_date_frmt}'") node_time = self.get_node_date(node) with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): - assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) + assert (node_time - in_date).total_seconds() < 60 - @reporter.step(f"Restore time") + @reporter.step("Restore time") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() - now_time = datetime.datetime.now(datetime.timezone.utc) + now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") with reporter.step(f"Set {now_time} time"): - shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") - shell.exec("hwclock --systohc") - - @reporter.step("Change the synchronizer status to {status}") - def set_sync_date_all_nodes(self, status: str): - if status == "active": - parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) - return - parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) + shell.exec(f"timedatectl set-time '{now_time}'") @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: @@ -500,16 +492,6 @@ class ClusterStateController: frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) return frostfs_adm, frostfs_cli, frostfs_cli_remote - def _enable_date_synchronizer(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("timedatectl set-ntp true") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15) - - def _disable_date_synchronizer(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("timedatectl set-ntp false") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15) - def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: disk_controller_id = DiskController.get_id(node, device) if disk_controller_id in self.detached_disks.keys(): From 97b9b5498af883d2dd111aa17b916d2aba36429e Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 21 Feb 2025 16:27:13 +0300 Subject: [PATCH 63/81] [#358] Add minor improvements for convenient work with clients Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/clients/http/__init__.py | 1 + src/frostfs_testlib/clients/s3/__init__.py | 4 ++- .../clients/s3/aws_cli_client.py | 6 +++-- .../clients/s3/boto3_client.py | 20 ++++++-------- src/frostfs_testlib/clients/s3/interfaces.py | 26 ++++++++++++------- .../resources/error_patterns.py | 1 + 6 files changed, 34 insertions(+), 24 deletions(-) diff --git a/src/frostfs_testlib/clients/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py index e69de29..ab6e2b0 100644 --- a/src/frostfs_testlib/clients/http/__init__.py +++ b/src/frostfs_testlib/clients/http/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.clients.http.http_client import HttpClient diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py index 65a3990..5481f48 100644 --- a/src/frostfs_testlib/clients/s3/__init__.py +++ b/src/frostfs_testlib/clients/s3/__init__.py @@ -1 +1,3 @@ -from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper, VersioningStatus +from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index accc289..8b2d774 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -33,12 +33,14 @@ class AwsCliClient(S3ClientWrapper): self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.s3gate_endpoint = s3gate_endpoint + self.iam_endpoint = None + self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.profile = profile - self.local_shell = LocalShell() self.region = region - self.iam_endpoint = None + + self.local_shell = LocalShell() try: _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 890b4e9..9d9fefe 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -35,26 +35,20 @@ class Boto3ClientWrapper(S3ClientWrapper): def __init__( self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: - self.boto3_client: S3Client = None self.s3gate_endpoint: str = "" + self.boto3_client: S3Client = None - self.boto3_iam_client: S3Client = None self.iam_endpoint: str = "" - + self.boto3_iam_client: S3Client = None self.boto3_sts_client: S3Client = None - self.access_key_id: str = access_key_id - self.secret_access_key: str = secret_access_key + self.access_key_id = access_key_id + self.secret_access_key = secret_access_key self.profile = profile self.region = region self.session = boto3.Session() - self.config = Config( - retries={ - "max_attempts": MAX_REQUEST_ATTEMPTS, - "mode": RETRY_MODE, - } - ) + self.config = Config(retries={"max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE}) self.set_endpoint(s3gate_endpoint) @@ -90,7 +84,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint_url=self.iam_endpoint, verify=False, ) - # since the STS does not have an enpoint, IAM is used + # since the STS does not have an endpoint, IAM is used self.boto3_sts_client = self.session.client( service_name="sts", aws_access_key_id=self.access_key_id, @@ -145,6 +139,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params = {"Bucket": bucket} if object_lock_enabled_for_bucket is not None: params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) + if acl is not None: params.update({"ACL": acl}) elif grant_write or grant_read or grant_full_control: @@ -154,6 +149,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params.update({"GrantRead": grant_read}) elif grant_full_control: params.update({"GrantFullControl": grant_full_control}) + if location_constraint: params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py index 7ce9f31..d636182 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -22,15 +22,15 @@ class VersioningStatus(HumanReadableEnum): SUSPENDED = "Suspended" -ACL_COPY = [ - "private", - "public-read", - "public-read-write", - "authenticated-read", - "aws-exec-read", - "bucket-owner-read", - "bucket-owner-full-control", -] +class ACL: + PRIVATE = "private" + PUBLIC_READ = "public-read" + PUBLIC_READ_WRITE = "public-read-write" + AUTHENTICATED_READ = "authenticated-read" + AWS_EXEC_READ = "aws-exec-read" + BUCKET_OWNER_READ = "bucket-owner-read" + BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control" + LOG_DELIVERY_WRITE = "log-delivery-write" class BucketContainerResolver(ABC): @@ -50,6 +50,14 @@ class BucketContainerResolver(ABC): class S3ClientWrapper(HumanReadableABC): + access_key_id: str + secret_access_key: str + profile: str + region: str + + s3gate_endpoint: str + iam_endpoint: str + @abstractmethod def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: pass diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 4c22648..6c0cb14 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -1,5 +1,6 @@ # Regex patterns of status codes of Container service CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" +SUBJECT_NOT_FOUND = "code = 1024.*message = frostfs error: chain/client.*subject not found.*" # Regex patterns of status codes of Object service MALFORMED_REQUEST = "code = 1024.*message = malformed request" From b00d080982804c8c9237a49a606dbf6fc4ef03f1 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 25 Feb 2025 16:43:34 +0300 Subject: [PATCH 64/81] [#357] Synchronize client and CliCommand timeouts Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/cli_command.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 3600e77..7fccc65 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -24,9 +24,7 @@ class CliCommand: def __init__(self, shell: Shell, cli_exec_path: str, **base_params): self.shell = shell self.cli_exec_path = cli_exec_path - self.__base_params = " ".join( - [f"--{param} {value}" for param, value in base_params.items() if value] - ) + self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value]) def _format_command(self, command: str, **params) -> str: param_str = [] @@ -48,9 +46,7 @@ class CliCommand: val_str = str(value_item).replace("'", "\\'") param_str.append(f"--{param} '{val_str}'") elif isinstance(value, dict): - param_str.append( - f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'' - ) + param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'') else: if "'" in str(value): value_str = str(value).replace('"', '\\"') @@ -63,12 +59,18 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - return self.shell.exec(self._format_command(command, **params)) - - def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: + timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None return self.shell.exec( self._format_command(command, **params), - options=CommandOptions( - interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)] + CommandOptions(timeout=timeout), + ) + + def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: + timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None + return self.shell.exec( + self._format_command(command, **params), + CommandOptions( + interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)], + timeout=timeout, ), ) From f1073d214cc300ede89cfd05907039511a1970f0 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Wed, 5 Mar 2025 15:29:35 +0300 Subject: [PATCH 65/81] [#360] Increased timeout for IAM policy attach/detach Signed-off-by: Yaroslava Lukoyanova --- src/frostfs_testlib/clients/s3/aws_cli_client.py | 12 ++++++------ src/frostfs_testlib/clients/s3/boto3_client.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index 8b2d774..a2e3fc7 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -979,7 +979,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -990,7 +990,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1122,7 +1122,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1133,7 +1133,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1352,7 +1352,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1367,7 +1367,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 9d9fefe..4157bd6 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -836,7 +836,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Attaches the specified managed policy to the specified user") @@ -848,7 +848,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") @@ -979,7 +979,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Removes the specified managed policy from the specified user") @@ -991,7 +991,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") @@ -1201,7 +1201,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") @@ -1216,7 +1216,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Removes the specified user from the specified group") From 0c4e601840d81ceef400e334b3d3bcd8bee4592e Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 3 Mar 2025 14:54:22 +0300 Subject: [PATCH 66/81] [#359] Override represantation method for Host Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/hosting/interfaces.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index f58d856..a41161c 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -29,6 +29,9 @@ class Host(ABC): self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} + def __repr__(self) -> str: + return self.config.address + @property def config(self) -> HostConfig: """Returns config of the host. From 7d2c92ebc096dc378666dce09d26cfd0a0313d2f Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 7 Mar 2025 15:18:43 +0300 Subject: [PATCH 67/81] [#361] Move common fixture to testlib Signed-off-by: a.berezin --- src/frostfs_testlib/__init__.py | 2 +- src/frostfs_testlib/fixtures.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index e557a79..4724a8b 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,4 +1,4 @@ __version__ = "2.0.1" -from .fixtures import configure_testlib, hosting, temp_directory +from .fixtures import configure_testlib, hosting, session_start_time, temp_directory from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py index d0f92f2..7d767d2 100644 --- a/src/frostfs_testlib/fixtures.py +++ b/src/frostfs_testlib/fixtures.py @@ -1,5 +1,6 @@ import logging import os +from datetime import datetime from importlib.metadata import entry_points import pytest @@ -11,6 +12,12 @@ from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE from frostfs_testlib.storage import get_service_registry +@pytest.fixture(scope="session", autouse=True) +def session_start_time(): + start_time = datetime.utcnow() + return start_time + + @pytest.fixture(scope="session") def configure_testlib(): reporter.get_reporter().register_handler(reporter.AllureHandler()) From c2af1bba5c300b1bb1758eaa19f687962ef98224 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 7 Mar 2025 18:14:38 +0300 Subject: [PATCH 68/81] [#362] Add functions to change date on nodes in `ClusterStateController` Signed-off-by: Kirill Sosnovskikh --- .../controllers/cluster_state_controller.py | 61 +++++++------------ 1 file changed, 22 insertions(+), 39 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 6370033..51aaefb 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -247,23 +247,20 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() - # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop all storage services on cluster") - def stop_all_storage_services(self, reversed_order: bool = False): - nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + @reporter.step("Restart {service_type} service on {node}") + def restart_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.restart_service() - for node in nodes: - self.stop_service_of_type(node, StorageNode) - - # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop all S3 gates on cluster") - def stop_all_s3_gates(self, reversed_order: bool = False): - nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + @reporter.step("Restart all {service_type} services") + def restart_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.restart_service for service in services]) - for node in nodes: - self.stop_service_of_type(node, S3Gate) + if service_type == StorageNode: + self.wait_after_storage_startup() # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -277,30 +274,6 @@ class ClusterStateController: def start_storage_service(self, node: ClusterNode): self.start_service_of_type(node, StorageNode) - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start stopped storage services") - def start_stopped_storage_services(self): - self.start_stopped_services_of_type(StorageNode) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop s3 gate on {node}") - def stop_s3_gate(self, node: ClusterNode, mask: bool = True): - self.stop_service_of_type(node, S3Gate, mask) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start s3 gate on {node}") - def start_s3_gate(self, node: ClusterNode): - self.start_service_of_type(node, S3Gate) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start stopped S3 gates") - def start_stopped_s3_gates(self): - self.start_stopped_services_of_type(S3Gate) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): @@ -392,19 +365,29 @@ class ClusterStateController: shell = node.host.get_shell() return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") - @reporter.step("Set node time to {in_date}") + @reporter.step("Set time on nodes in {in_date}") + def change_date_on_all_nodes(self, cluster: Cluster, in_date: datetime) -> None: + parallel(self.change_node_date, cluster.cluster_nodes, in_date=in_date) + + @reporter.step("Set time on {node} to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") shell.exec(f"timedatectl set-time '{in_date_frmt}'") node_time = self.get_node_date(node) + with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): assert (node_time - in_date).total_seconds() < 60 - @reporter.step("Restore time") + @reporter.step("Restore time on nodes") + def restore_date_on_all_nodes(self, cluster: Cluster) -> None: + parallel(self.restore_node_date, cluster.cluster_nodes) + + @reporter.step("Restore time on {node}") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + with reporter.step(f"Set {now_time} time"): shell.exec(f"timedatectl set-time '{now_time}'") From dfb048fe519f6ab72d59453569ead9cf2e93cafa Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 11 Mar 2025 17:22:13 +0300 Subject: [PATCH 69/81] [#363] Add accounting for timeout inaccuracy between process and cli Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/cli_command.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 7fccc65..224e9e3 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -1,10 +1,11 @@ from typing import Optional from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell +from frostfs_testlib.utils.datetime_utils import parse_time class CliCommand: - + TIMEOUT_INACCURACY = 10 WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" @@ -59,14 +60,18 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None + if timeout := params.get("timeout"): + timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY + return self.shell.exec( self._format_command(command, **params), CommandOptions(timeout=timeout), ) def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: - timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None + if timeout := params.get("timeout"): + timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY + return self.shell.exec( self._format_command(command, **params), CommandOptions( From 3966f65c95cbad9f5adc99d9c396178008409c37 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 17 Mar 2025 16:24:36 +0300 Subject: [PATCH 70/81] [#364] Fixed hook order tests collection Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/hooks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index 1ada660..c56c75a 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -16,6 +16,9 @@ def pytest_add_frostfs_marker(items: list[pytest.Item]): # pytest hook. Do not rename @pytest.hookimpl(trylast=True) def pytest_collection_modifyitems(items: list[pytest.Item]): + # The order of running tests corresponded to the suites + items.sort(key=lambda item: item.nodeid) + # Change order of tests based on @pytest.mark.order() marker def order(item: pytest.Item) -> int: order_marker = item.get_closest_marker("order") From dcde9e15b104602f117e6ed352f30726601d8545 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 13 Mar 2025 16:53:42 +0300 Subject: [PATCH 71/81] [#365] Change type hint for `NetmapOperations.nodeinfo` Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/netmap_parser.py | 2 +- .../storage/grpc_operations/implementations/netmap.py | 4 ++-- .../storage/grpc_operations/interfaces/netmap.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 2c97c3a..4b4a501 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -89,7 +89,7 @@ class NetmapParser: return snapshot @staticmethod - def node_info(output: dict) -> NodeNetmapInfo: + def node_info(output: dict) -> NodeInfo: data_dict = {"attributes": {}} for key, value in output.items(): diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py index 905171b..76ee69a 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py @@ -4,7 +4,7 @@ from typing import List, Optional from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo from .. import interfaces @@ -86,7 +86,7 @@ class NetmapOperations(interfaces.NetmapInterface): trace: Optional[bool] = True, xhdr: Optional[dict] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> NodeNetmapInfo: + ) -> NodeInfo: """ Get target node info. """ diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py index 3f0a341..3fdc98a 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py @@ -1,7 +1,7 @@ from abc import ABC, abstractmethod from typing import List, Optional -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo class NetmapInterface(ABC): @@ -50,7 +50,7 @@ class NetmapInterface(ABC): ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, - ) -> NodeNetmapInfo: + ) -> NodeInfo: """ Get target node info. """ From 91a2706b06f2bb5d00f0ef60ef5bf1e2c55ece3a Mon Sep 17 00:00:00 2001 From: anurindm Date: Wed, 19 Mar 2025 11:43:21 +0300 Subject: [PATCH 72/81] [#366] Test order depends on location Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index c56c75a..d7e4cc8 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -17,7 +17,7 @@ def pytest_add_frostfs_marker(items: list[pytest.Item]): @pytest.hookimpl(trylast=True) def pytest_collection_modifyitems(items: list[pytest.Item]): # The order of running tests corresponded to the suites - items.sort(key=lambda item: item.nodeid) + items.sort(key=lambda item: item.location[0]) # Change order of tests based on @pytest.mark.order() marker def order(item: pytest.Item) -> int: From 8bedd9b3d6d57b493a93888f35177e58eb35fb0d Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 19 Mar 2025 14:33:25 +0300 Subject: [PATCH 73/81] [#367] Use full date during log Signed-off-by: a.berezin --- src/frostfs_testlib/shell/local_shell.py | 2 +- src/frostfs_testlib/shell/ssh_shell.py | 23 +++++++---------------- src/frostfs_testlib/utils/cli_utils.py | 2 +- 3 files changed, 9 insertions(+), 18 deletions(-) diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 746070f..c0f3b06 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -141,6 +141,6 @@ class LocalShell(Shell): f"RETCODE: {result.return_code}\n\n" f"STDOUT:\n{result.stdout}\n" f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" ) reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index e718b4d..3f13dca 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -68,8 +68,7 @@ class SshConnectionProvider: try: if creds.ssh_key_path: logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " - f"{creds.ssh_key_path} (attempt {attempt})" + f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " f"{creds.ssh_key_path} (attempt {attempt})" ) connection.connect( hostname=host, @@ -79,9 +78,7 @@ class SshConnectionProvider: timeout=self.CONNECTION_TIMEOUT, ) else: - logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})" - ) + logger.info(f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})") connection.connect( hostname=host, port=port, @@ -104,9 +101,7 @@ class SshConnectionProvider: connection.close() can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS if can_retry: - logger.warn( - f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}" - ) + logger.warn(f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}") sleep(self.SSH_ATTEMPTS_INTERVAL) continue logger.exception(f"Can't connect to host {host}") @@ -139,7 +134,7 @@ def log_command(func): f"RC:\n {result.return_code}\n" f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n" f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" ) if not options.no_log: @@ -185,13 +180,11 @@ class SSHShell(Shell): private_key_passphrase: Optional[str] = None, port: str = "22", command_inspectors: Optional[list[CommandInspector]] = None, - custom_environment: Optional[dict] = None + custom_environment: Optional[dict] = None, ) -> None: super().__init__() self.connection_provider = SshConnectionProvider() - self.connection_provider.store_creds( - host, SshCredentials(login, password, private_key_path, private_key_passphrase) - ) + self.connection_provider.store_creds(host, SshCredentials(login, password, private_key_path, private_key_passphrase)) self.host = host self.port = port @@ -220,9 +213,7 @@ class SSHShell(Shell): result = self._exec_non_interactive(command, options) if options.check and result.return_code != 0: - raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n" - ) + raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n") return result @log_command diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 0f9fef2..8787296 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -68,7 +68,7 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" f"RC: {return_code}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {end_time - start_time}" ) with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): reporter.attach(command_attachment, "Command execution") From 6bbc359ec9e653f74aa92346d0ee971e944af3cd Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Thu, 20 Mar 2025 09:05:50 +0300 Subject: [PATCH 74/81] [#368] Fixed function check metrics Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/steps/metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py index a9e545a..0d0950a 100644 --- a/src/frostfs_testlib/steps/metrics.py +++ b/src/frostfs_testlib/steps/metrics.py @@ -6,7 +6,7 @@ from frostfs_testlib.testing.test_control import wait_for_success @reporter.step("Check metrics result") -@wait_for_success(interval=10) +@wait_for_success(max_wait_time=300, interval=10) def check_metrics_counter( cluster_nodes: list[ClusterNode], operator: str = "==", @@ -19,7 +19,7 @@ def check_metrics_counter( counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) assert eval( f"{counter_act} {operator} {counter_exp}" - ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}" + ), f"Actual: {counter_act} {operator} Expected: {counter_exp} in nodes: {cluster_nodes}" @reporter.step("Get metrics value from node: {node}") From c8eec119062001768568d1d0da3e93f7d761dfb8 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 20 Mar 2025 17:11:45 +0300 Subject: [PATCH 75/81] [#369] Set region in S3 STS client Signed-off-by: Yaroslava Lukoyanova --- src/frostfs_testlib/clients/s3/boto3_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 4157bd6..bceecdf 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -90,6 +90,7 @@ class Boto3ClientWrapper(S3ClientWrapper): aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, endpoint_url=iam_endpoint, + region_name=self.region, verify=False, ) From c4ab14fce8acf26907132f91f0b3566edc853bf7 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 21 Mar 2025 20:03:06 +0300 Subject: [PATCH 76/81] [#370] Unify `delete_object_tagging` method in S3 clients Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/clients/s3/boto3_client.py | 2 +- src/frostfs_testlib/clients/s3/interfaces.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index bceecdf..dd13e6f 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -770,7 +770,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("TagSet") @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str) -> None: + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: params = self._convert_to_s3_params(locals()) self._exec_request( self.boto3_client.delete_object_tagging, diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py index d636182..b35d3bf 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -377,7 +377,7 @@ class S3ClientWrapper(HumanReadableABC): """Returns the tag-set of an object.""" @abstractmethod - def delete_object_tagging(self, bucket: str, key: str) -> None: + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: """Removes the entire tag set from the specified object.""" @abstractmethod From d38808a1f55e370d43e868e7551127dea6506998 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 3 Feb 2025 12:44:21 +0300 Subject: [PATCH 77/81] [#354] Support of presigned url methods for S3 Signed-off-by: Yaroslava Lukoyanova --- .../clients/s3/aws_cli_client.py | 9 +++++++++ src/frostfs_testlib/clients/s3/boto3_client.py | 18 +++++++++++++++++- src/frostfs_testlib/clients/s3/interfaces.py | 4 ++++ src/frostfs_testlib/steps/http_gate.py | 4 ++++ 4 files changed, 34 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index a2e3fc7..c1dd6b6 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -959,6 +959,15 @@ class AwsCliClient(S3ClientWrapper): return json_output + @reporter.step("Create presign url for the object") + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + # AWS CLI does not support method definition and world only in 'get_object' state by default + cmd = f"aws {self.common_flags} s3 presign s3://{bucket}/{key} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + if expires_in: + cmd += f" --expires-in {expires_in}" + response = self.local_shell.exec(cmd).stdout + return response.strip() + # IAM METHODS # # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index dd13e6f..0c4e8e4 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -48,7 +48,13 @@ class Boto3ClientWrapper(S3ClientWrapper): self.region = region self.session = boto3.Session() - self.config = Config(retries={"max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE}) + self.config = Config( + signature_version="s3v4", + retries={ + "max_attempts": MAX_REQUEST_ATTEMPTS, + "mode": RETRY_MODE, + }, + ) self.set_endpoint(s3gate_endpoint) @@ -813,6 +819,16 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> dict: raise NotImplementedError("Cp is not supported for boto3 client") + @reporter.step("Create presign url for the object") + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + response = self._exec_request( + method=self.boto3_client.generate_presigned_url, + params={"ClientMethod": method, "Params": {"Bucket": bucket, "Key": key}, "ExpiresIn": expires_in}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response + # END OBJECT METHODS # # IAM METHODS # diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py index b35d3bf..0d03a28 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -425,6 +425,10 @@ class S3ClientWrapper(HumanReadableABC): ) -> dict: """cp directory TODO: Add proper description""" + @abstractmethod + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + """Creates presign URL""" + # END OF OBJECT METHODS # # IAM METHODS # diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http_gate.py index 51b0301..aa4abf2 100644 --- a/src/frostfs_testlib/steps/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -33,6 +33,7 @@ def get_via_http_gate( oid: str, node: ClusterNode, request_path: Optional[str] = None, + presigned_url: Optional[str] = None, timeout: Optional[int] = 300, ): """ @@ -47,6 +48,9 @@ def get_via_http_gate( if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" + if presigned_url: + request = presigned_url + response = requests.get(request, stream=True, timeout=timeout, verify=False) if not response.ok: From 80226ee0a8c2e309394bc7de13f0dba794e4fad6 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 27 Mar 2025 15:25:24 +0300 Subject: [PATCH 78/81] [#371] Add IAM and STS clients to boto3-stubs Signed-off-by: Kirill Sosnovskikh --- pyproject.toml | 2 +- requirements.txt | 4 ++-- src/frostfs_testlib/clients/s3/boto3_client.py | 6 ++++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2778f8a..d62f04b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ dependencies = [ "pytest==7.1.2", "tenacity==8.0.1", "boto3==1.35.30", - "boto3-stubs[essential]==1.35.30", + "boto3-stubs[s3,iam,sts]==1.35.30", ] requires-python = ">=3.10" diff --git a/requirements.txt b/requirements.txt index a0bcc11..56d9b83 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ testrail-api==1.12.0 tenacity==8.0.1 pytest==7.1.2 boto3==1.35.30 -boto3-stubs[essential]==1.35.30 +boto3-stubs[s3,iam,sts]==1.35.30 pydantic==2.10.6 # Dev dependencies @@ -22,4 +22,4 @@ pylint==2.17.4 # Packaging dependencies build==0.8.0 setuptools==65.3.0 -twine==4.0.1 +twine==4.0.1 \ No newline at end of file diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 0c4e8e4..ac4d55b 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -10,7 +10,9 @@ import boto3 import urllib3 from botocore.config import Config from botocore.exceptions import ClientError +from mypy_boto3_iam import IAMClient from mypy_boto3_s3 import S3Client +from mypy_boto3_sts import STSClient from frostfs_testlib import reporter from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict @@ -39,8 +41,8 @@ class Boto3ClientWrapper(S3ClientWrapper): self.boto3_client: S3Client = None self.iam_endpoint: str = "" - self.boto3_iam_client: S3Client = None - self.boto3_sts_client: S3Client = None + self.boto3_iam_client: IAMClient = None + self.boto3_sts_client: STSClient = None self.access_key_id = access_key_id self.secret_access_key = secret_access_key From aab4d4f657590dcb1be0231b477862192d51c33c Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 15 Apr 2025 12:26:35 +0300 Subject: [PATCH 79/81] [#373] Add step to httpClient for log write Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/clients/http/http_client.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py index aebd5ef..16d7707 100644 --- a/src/frostfs_testlib/clients/http/http_client.py +++ b/src/frostfs_testlib/clients/http/http_client.py @@ -15,14 +15,14 @@ LOGGING_CONFIG = { "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, "formatters": { "http": { - "format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s", + "format": "%(asctime)s [%(levelname)s] %(name)s - %(message)s", "datefmt": "%Y-%m-%d %H:%M:%S", } }, "loggers": { "httpx": { "handlers": ["default"], - "level": "DEBUG", + "level": "ERROR", }, "httpcore": { "handlers": ["default"], @@ -43,7 +43,7 @@ class HttpClient: response = client.request(method, url, **kwargs) self._attach_response(response, **kwargs) - logger.info(f"Response: {response.status_code} => {response.text}") + # logger.info(f"Response: {response.status_code} => {response.text}") if expected_status_code: assert ( @@ -131,6 +131,7 @@ class HttpClient: reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") + cls._write_log(curl_request, response_body, response.status_code) @classmethod def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: @@ -143,3 +144,9 @@ class HttpClient: # Option -k means no verify SSL return f"curl {url} -X {method} {headers}{data} -k" + + @classmethod + def _write_log(cls, curl: str, res_body: str, res_code: int) -> None: + if res_body: + curl += f"\nResponse: {res_code}\n{res_body}" + logger.info(f"{curl}") From 9ad620121e3871f9eab4e5afd3495197541a90a9 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Wed, 9 Apr 2025 16:15:46 +0300 Subject: [PATCH 80/81] [#372] Added decorator wait until stabilization metric values Signed-off-by: Ilyas Niyazov --- .../storage/dataclasses/metrics.py | 48 ++++++++++++++++++- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py index 81e757c..8969015 100644 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -1,3 +1,9 @@ +import time +from functools import wraps +from typing import Callable + +import pytest + from frostfs_testlib.hosting import Host from frostfs_testlib.shell.interfaces import CommandResult @@ -7,11 +13,11 @@ class Metrics: self.storage = StorageMetrics(host, metrics_endpoint) - class StorageMetrics: """ Class represents storage metrics in a cluster """ + def __init__(self, host: Host, metrics_endpoint: str) -> None: self.host = host self.metrics_endpoint = metrics_endpoint @@ -29,8 +35,46 @@ class StorageMetrics: additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") return result - + def get_all_metrics(self) -> CommandResult: shell = self.host.get_shell() result = shell.exec(f"curl -s {self.metrics_endpoint}") return result + + +def wait_until_metric_result_is_stable( + relative_deviation: float = None, absolute_deviation: int = None, max_attempts: int = 10, sleep_interval: int = 30 +): + """ + A decorator function that repeatedly calls the decorated function until its result stabilizes + within a specified relative tolerance or until the maximum number of attempts is reached. + + This decorator is useful for scenarios where a function returns a metric or value that may fluctuate + over time, and you want to ensure that the result has stabilized before proceeding. + """ + + def decorator(func: Callable): + @wraps(func) + def wrapper(*args, **kwargs): + last_result = None + for _ in range(max_attempts): + # first function call + first_result = func(*args, **kwargs) + + # waiting before the second call + time.sleep(sleep_interval) + + # second function call + last_result = func(*args, **kwargs) + + # checking value stability + if first_result == pytest.approx(last_result, rel=relative_deviation, abs=absolute_deviation): + return last_result + + # if stability is not achieved, return the last value + if last_result is not None: + return last_result + + return wrapper + + return decorator From 517a7b932261a142b2a86b2687843d8fc9651ce0 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Mon, 28 Apr 2025 18:43:44 +0300 Subject: [PATCH 81/81] [#377] Update text for "subject not found" error Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/resources/error_patterns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 6c0cb14..15e2977 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -1,6 +1,6 @@ # Regex patterns of status codes of Container service CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" -SUBJECT_NOT_FOUND = "code = 1024.*message = frostfs error: chain/client.*subject not found.*" +SUBJECT_NOT_FOUND = "code = 1024.*message =.*chain/client.*subject not found.*" # Regex patterns of status codes of Object service MALFORMED_REQUEST = "code = 1024.*message = malformed request"