diff --git a/CODEOWNERS b/CODEOWNERS index 519ca42..4a621d3 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,3 +1 @@ -.* @TrueCloudLab/qa-committers -.forgejo/.* @potyarkin -Makefile @potyarkin +* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov diff --git a/pyproject.toml b/pyproject.toml index d62f04b..2778f8a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ dependencies = [ "pytest==7.1.2", "tenacity==8.0.1", "boto3==1.35.30", - "boto3-stubs[s3,iam,sts]==1.35.30", + "boto3-stubs[essential]==1.35.30", ] requires-python = ">=3.10" diff --git a/requirements.txt b/requirements.txt index 56d9b83..e012366 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,8 +9,7 @@ testrail-api==1.12.0 tenacity==8.0.1 pytest==7.1.2 boto3==1.35.30 -boto3-stubs[s3,iam,sts]==1.35.30 -pydantic==2.10.6 +boto3-stubs[essential]==1.35.30 # Dev dependencies black==22.8.0 @@ -22,4 +21,4 @@ pylint==2.17.4 # Packaging dependencies build==0.8.0 setuptools==65.3.0 -twine==4.0.1 \ No newline at end of file +twine==4.0.1 diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 4724a8b..1ceb972 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,4 +1,4 @@ __version__ = "2.0.1" -from .fixtures import configure_testlib, hosting, session_start_time, temp_directory -from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems +from .fixtures import configure_testlib, hosting, temp_directory +from .hooks import pytest_collection_modifyitems diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 224e9e3..3600e77 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -1,11 +1,10 @@ from typing import Optional from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell -from frostfs_testlib.utils.datetime_utils import parse_time class CliCommand: - TIMEOUT_INACCURACY = 10 + WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" @@ -25,7 +24,9 @@ class CliCommand: def __init__(self, shell: Shell, cli_exec_path: str, **base_params): self.shell = shell self.cli_exec_path = cli_exec_path - self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value]) + self.__base_params = " ".join( + [f"--{param} {value}" for param, value in base_params.items() if value] + ) def _format_command(self, command: str, **params) -> str: param_str = [] @@ -47,7 +48,9 @@ class CliCommand: val_str = str(value_item).replace("'", "\\'") param_str.append(f"--{param} '{val_str}'") elif isinstance(value, dict): - param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'') + param_str.append( + f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'' + ) else: if "'" in str(value): value_str = str(value).replace('"', '\\"') @@ -60,22 +63,12 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - if timeout := params.get("timeout"): - timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY - - return self.shell.exec( - self._format_command(command, **params), - CommandOptions(timeout=timeout), - ) + return self.shell.exec(self._format_command(command, **params)) def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: - if timeout := params.get("timeout"): - timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY - return self.shell.exec( self._format_command(command, **params), - CommandOptions( - interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)], - timeout=timeout, + options=CommandOptions( + interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)] ), ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index 7874f18..d83b7ae 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -29,7 +29,6 @@ class FrostfsCli: util: FrostfsCliUtil version: FrostfsCliVersion control: FrostfsCliControl - ape_manager: FrostfsCliApeManager def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index cd197d3..d219940 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -12,7 +12,6 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -43,7 +42,6 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -75,7 +73,6 @@ class FrostfsCliNetmap(CliCommand): generate_key: bool = False, json: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -107,7 +104,6 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 4b4a501..23ac4da 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -20,6 +20,8 @@ class NetmapParser: "withdrawal_fee": r"Withdrawal fee: (?P\d+)", "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", + "eigen_trust_alpha": r"EigenTrustAlpha: (?P\d+\w+$)", + "eigen_trust_iterations": r"EigenTrustIterations: (?P\d+)", } parse_result = {} @@ -62,7 +64,7 @@ class NetmapParser: for node in netmap_nodes: for key, regex in regexes.items(): search_result = re.search(regex, node, flags=re.MULTILINE) - if search_result is None: + if search_result == None: result_netmap[key] = None continue if key == "node_data_ips": @@ -81,22 +83,9 @@ class NetmapParser: return dataclasses_netmap @staticmethod - def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None: + def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - for snapshot in snapshot_nodes: - for endpoint in snapshot.external_address: - if rpc_endpoint.split(":")[0] in endpoint: - return snapshot - - @staticmethod - def node_info(output: dict) -> NodeInfo: - data_dict = {"attributes": {}} - - for key, value in output.items(): - if key != "attributes": - data_dict[key] = value - - for attribute in output["attributes"]: - data_dict["attributes"][attribute["key"]] = attribute["value"] - - return NodeInfo(**data_dict) + snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip] + if not snapshot_node: + return None + return snapshot_node[0] diff --git a/src/frostfs_testlib/clients/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py index ab6e2b0..e69de29 100644 --- a/src/frostfs_testlib/clients/http/__init__.py +++ b/src/frostfs_testlib/clients/http/__init__.py @@ -1 +0,0 @@ -from frostfs_testlib.clients.http.http_client import HttpClient diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py index 16d7707..c3e5fae 100644 --- a/src/frostfs_testlib/clients/http/http_client.py +++ b/src/frostfs_testlib/clients/http/http_client.py @@ -15,14 +15,14 @@ LOGGING_CONFIG = { "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, "formatters": { "http": { - "format": "%(asctime)s [%(levelname)s] %(name)s - %(message)s", + "format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s", "datefmt": "%Y-%m-%d %H:%M:%S", } }, "loggers": { "httpx": { "handlers": ["default"], - "level": "ERROR", + "level": "DEBUG", }, "httpcore": { "handlers": ["default"], @@ -43,7 +43,7 @@ class HttpClient: response = client.request(method, url, **kwargs) self._attach_response(response, **kwargs) - # logger.info(f"Response: {response.status_code} => {response.text}") + logger.info(f"Response: {response.status_code} => {response.text}") if expected_status_code: assert ( @@ -131,22 +131,14 @@ class HttpClient: reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") - cls._write_log(curl_request, response_body, response.status_code) @classmethod def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: - excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"} - headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers) - + headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) data = f" -d '{data}'" if data else "" + for name, path in files.items(): data += f' -F "{name}=@{path}"' # Option -k means no verify SSL return f"curl {url} -X {method} {headers}{data} -k" - - @classmethod - def _write_log(cls, curl: str, res_body: str, res_code: int) -> None: - if res_body: - curl += f"\nResponse: {res_code}\n{res_body}" - logger.info(f"{curl}") diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py index 5481f48..65a3990 100644 --- a/src/frostfs_testlib/clients/s3/__init__.py +++ b/src/frostfs_testlib/clients/s3/__init__.py @@ -1,3 +1 @@ -from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus +from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index c1dd6b6..3496b2b 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -33,14 +33,12 @@ class AwsCliClient(S3ClientWrapper): self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.s3gate_endpoint = s3gate_endpoint - self.iam_endpoint = None - self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.profile = profile - self.region = region - self.local_shell = LocalShell() + self.region = region + self.iam_endpoint = None try: _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") @@ -959,15 +957,6 @@ class AwsCliClient(S3ClientWrapper): return json_output - @reporter.step("Create presign url for the object") - def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: - # AWS CLI does not support method definition and world only in 'get_object' state by default - cmd = f"aws {self.common_flags} s3 presign s3://{bucket}/{key} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - if expires_in: - cmd += f" --expires-in {expires_in}" - response = self.local_shell.exec(cmd).stdout - return response.strip() - # IAM METHODS # # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) @@ -988,7 +977,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -999,7 +988,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -1131,7 +1120,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -1142,7 +1131,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -1238,7 +1227,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @@ -1250,7 +1239,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @@ -1275,7 +1264,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @@ -1287,7 +1276,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @@ -1299,7 +1288,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @@ -1335,7 +1324,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @@ -1361,7 +1350,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -1376,7 +1365,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index ac4d55b..53e7ffa 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -10,9 +10,7 @@ import boto3 import urllib3 from botocore.config import Config from botocore.exceptions import ClientError -from mypy_boto3_iam import IAMClient from mypy_boto3_s3 import S3Client -from mypy_boto3_sts import STSClient from frostfs_testlib import reporter from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict @@ -37,25 +35,25 @@ class Boto3ClientWrapper(S3ClientWrapper): def __init__( self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: - self.s3gate_endpoint: str = "" self.boto3_client: S3Client = None + self.s3gate_endpoint: str = "" + self.boto3_iam_client: S3Client = None self.iam_endpoint: str = "" - self.boto3_iam_client: IAMClient = None - self.boto3_sts_client: STSClient = None - self.access_key_id = access_key_id - self.secret_access_key = secret_access_key + self.boto3_sts_client: S3Client = None + + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key self.profile = profile self.region = region self.session = boto3.Session() self.config = Config( - signature_version="s3v4", retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE, - }, + } ) self.set_endpoint(s3gate_endpoint) @@ -92,13 +90,12 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint_url=self.iam_endpoint, verify=False, ) - # since the STS does not have an endpoint, IAM is used + # since the STS does not have an enpoint, IAM is used self.boto3_sts_client = self.session.client( service_name="sts", aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, endpoint_url=iam_endpoint, - region_name=self.region, verify=False, ) @@ -148,7 +145,6 @@ class Boto3ClientWrapper(S3ClientWrapper): params = {"Bucket": bucket} if object_lock_enabled_for_bucket is not None: params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) - if acl is not None: params.update({"ACL": acl}) elif grant_write or grant_read or grant_full_control: @@ -158,7 +154,6 @@ class Boto3ClientWrapper(S3ClientWrapper): params.update({"GrantRead": grant_read}) elif grant_full_control: params.update({"GrantFullControl": grant_full_control}) - if location_constraint: params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) @@ -778,7 +773,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("TagSet") @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + def delete_object_tagging(self, bucket: str, key: str) -> None: params = self._convert_to_s3_params(locals()) self._exec_request( self.boto3_client.delete_object_tagging, @@ -821,16 +816,6 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> dict: raise NotImplementedError("Cp is not supported for boto3 client") - @reporter.step("Create presign url for the object") - def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: - response = self._exec_request( - method=self.boto3_client.generate_presigned_url, - params={"ClientMethod": method, "Params": {"Bucket": bucket, "Key": key}, "ExpiresIn": expires_in}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response - # END OBJECT METHODS # # IAM METHODS # @@ -855,7 +840,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Attaches the specified managed policy to the specified user") @@ -867,7 +852,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") @@ -998,7 +983,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified managed policy from the specified user") @@ -1010,7 +995,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") @@ -1106,7 +1091,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") @@ -1117,7 +1102,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") @@ -1142,7 +1127,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM groups") @@ -1152,7 +1137,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") @@ -1163,7 +1148,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists all the managed policies that are available in your AWS account") @@ -1195,7 +1180,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM users") @@ -1220,7 +1205,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") @@ -1235,7 +1220,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified user from the specified group") diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py index 0d03a28..7ce9f31 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -22,15 +22,15 @@ class VersioningStatus(HumanReadableEnum): SUSPENDED = "Suspended" -class ACL: - PRIVATE = "private" - PUBLIC_READ = "public-read" - PUBLIC_READ_WRITE = "public-read-write" - AUTHENTICATED_READ = "authenticated-read" - AWS_EXEC_READ = "aws-exec-read" - BUCKET_OWNER_READ = "bucket-owner-read" - BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control" - LOG_DELIVERY_WRITE = "log-delivery-write" +ACL_COPY = [ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control", +] class BucketContainerResolver(ABC): @@ -50,14 +50,6 @@ class BucketContainerResolver(ABC): class S3ClientWrapper(HumanReadableABC): - access_key_id: str - secret_access_key: str - profile: str - region: str - - s3gate_endpoint: str - iam_endpoint: str - @abstractmethod def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: pass @@ -377,7 +369,7 @@ class S3ClientWrapper(HumanReadableABC): """Returns the tag-set of an object.""" @abstractmethod - def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + def delete_object_tagging(self, bucket: str, key: str) -> None: """Removes the entire tag set from the specified object.""" @abstractmethod @@ -425,10 +417,6 @@ class S3ClientWrapper(HumanReadableABC): ) -> dict: """cp directory TODO: Add proper description""" - @abstractmethod - def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: - """Creates presign URL""" - # END OF OBJECT METHODS # # IAM METHODS # diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py index 7d767d2..d0f92f2 100644 --- a/src/frostfs_testlib/fixtures.py +++ b/src/frostfs_testlib/fixtures.py @@ -1,6 +1,5 @@ import logging import os -from datetime import datetime from importlib.metadata import entry_points import pytest @@ -12,12 +11,6 @@ from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE from frostfs_testlib.storage import get_service_registry -@pytest.fixture(scope="session", autouse=True) -def session_start_time(): - start_time = datetime.utcnow() - return start_time - - @pytest.fixture(scope="session") def configure_testlib(): reporter.get_reporter().register_handler(reporter.AllureHandler()) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index d7e4cc8..6830e78 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -1,8 +1,8 @@ import pytest -@pytest.hookimpl(specname="pytest_collection_modifyitems") -def pytest_add_frostfs_marker(items: list[pytest.Item]): +@pytest.hookimpl +def pytest_collection_modifyitems(items: list[pytest.Item]): # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding # nodeid = full path of the test # 1. plugins @@ -11,21 +11,3 @@ def pytest_add_frostfs_marker(items: list[pytest.Item]): location = item.location[0] if "frostfs" in location and "plugin" not in location and "testlib" not in location: item.add_marker("frostfs") - - -# pytest hook. Do not rename -@pytest.hookimpl(trylast=True) -def pytest_collection_modifyitems(items: list[pytest.Item]): - # The order of running tests corresponded to the suites - items.sort(key=lambda item: item.location[0]) - - # Change order of tests based on @pytest.mark.order() marker - def order(item: pytest.Item) -> int: - order_marker = item.get_closest_marker("order") - if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)): - raise RuntimeError("Incorrect usage of pytest.mark.order") - - order_value = order_marker.args[0] if order_marker else 0 - return order_value - - items.sort(key=lambda item: order(item)) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index a41161c..f58d856 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -29,9 +29,6 @@ class Host(ABC): self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} - def __repr__(self) -> str: - return self.config.address - @property def config(self) -> HostConfig: """Returns config of the host. diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 3830203..15103e0 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -182,10 +182,8 @@ class Preset(MetaConfig): pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) # Workers count for preset workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) - # TODO: Deprecated. Acl for container/buckets + # Acl for container/buckets acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) - # APE rule for containers instead of deprecated ACL - rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list) # ------ GRPC ------ # Amount of containers which should be created diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 071675a..5624940 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -193,7 +193,7 @@ class RemoteProcess: ) if "No such file or directory" in terminal.stderr: return None - elif terminal.return_code != 0: + elif terminal.stderr or terminal.return_code != 0: raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") return terminal.stdout diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 15e2977..4c22648 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -1,6 +1,5 @@ # Regex patterns of status codes of Container service CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" -SUBJECT_NOT_FOUND = "code = 1024.*message =.*chain/client.*subject not found.*" # Regex patterns of status codes of Object service MALFORMED_REQUEST = "code = 1024.*message = malformed request" diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index c0f3b06..746070f 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -141,6 +141,6 @@ class LocalShell(Shell): f"RETCODE: {result.return_code}\n\n" f"STDOUT:\n{result.stdout}\n" f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" ) reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 3f13dca..e718b4d 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -68,7 +68,8 @@ class SshConnectionProvider: try: if creds.ssh_key_path: logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " f"{creds.ssh_key_path} (attempt {attempt})" + f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " + f"{creds.ssh_key_path} (attempt {attempt})" ) connection.connect( hostname=host, @@ -78,7 +79,9 @@ class SshConnectionProvider: timeout=self.CONNECTION_TIMEOUT, ) else: - logger.info(f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})") + logger.info( + f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})" + ) connection.connect( hostname=host, port=port, @@ -101,7 +104,9 @@ class SshConnectionProvider: connection.close() can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS if can_retry: - logger.warn(f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}") + logger.warn( + f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}" + ) sleep(self.SSH_ATTEMPTS_INTERVAL) continue logger.exception(f"Can't connect to host {host}") @@ -134,7 +139,7 @@ def log_command(func): f"RC:\n {result.return_code}\n" f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n" f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n" - f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" ) if not options.no_log: @@ -180,11 +185,13 @@ class SSHShell(Shell): private_key_passphrase: Optional[str] = None, port: str = "22", command_inspectors: Optional[list[CommandInspector]] = None, - custom_environment: Optional[dict] = None, + custom_environment: Optional[dict] = None ) -> None: super().__init__() self.connection_provider = SshConnectionProvider() - self.connection_provider.store_creds(host, SshCredentials(login, password, private_key_path, private_key_passphrase)) + self.connection_provider.store_creds( + host, SshCredentials(login, password, private_key_path, private_key_passphrase) + ) self.host = host self.port = port @@ -213,7 +220,9 @@ class SSHShell(Shell): result = self._exec_non_interactive(command, options) if options.check and result.return_code != 0: - raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n") + raise RuntimeError( + f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n" + ) return result @log_command diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 7f8391d..f28de06 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -12,7 +12,6 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils @@ -753,10 +752,7 @@ def get_object_nodes( ] object_nodes = [ - cluster_node - for netmap_node in netmap_nodes - for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) + cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip ] return object_nodes diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http_gate.py index aa4abf2..51b0301 100644 --- a/src/frostfs_testlib/steps/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -33,7 +33,6 @@ def get_via_http_gate( oid: str, node: ClusterNode, request_path: Optional[str] = None, - presigned_url: Optional[str] = None, timeout: Optional[int] = 300, ): """ @@ -48,9 +47,6 @@ def get_via_http_gate( if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" - if presigned_url: - request = presigned_url - response = requests.get(request, stream=True, timeout=timeout, verify=False) if not response.ok: diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py index 0d0950a..a9e545a 100644 --- a/src/frostfs_testlib/steps/metrics.py +++ b/src/frostfs_testlib/steps/metrics.py @@ -6,7 +6,7 @@ from frostfs_testlib.testing.test_control import wait_for_success @reporter.step("Check metrics result") -@wait_for_success(max_wait_time=300, interval=10) +@wait_for_success(interval=10) def check_metrics_counter( cluster_nodes: list[ClusterNode], operator: str = "==", @@ -19,7 +19,7 @@ def check_metrics_counter( counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) assert eval( f"{counter_act} {operator} {counter_exp}" - ), f"Actual: {counter_act} {operator} Expected: {counter_exp} in nodes: {cluster_nodes}" + ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}" @reporter.step("Get metrics value from node: {node}") diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 2e49208..39c6b66 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -5,7 +5,6 @@ class ConfigAttributes: WALLET_CONFIG = "wallet_config" CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" - WORKING_DIR = "working_dir" SHARD_CONFIG_PATH = "shard_config_path" LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" @@ -16,7 +15,6 @@ class ConfigAttributes: ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_PROMETHEUS = "endpoint_prometheus" - ENDPOINT_PPROF = "endpoint_pprof" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 51aaefb..67e4d60 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,7 +1,7 @@ +import datetime import itertools import logging import time -from datetime import datetime, timezone from typing import TypeVar import frostfs_testlib.resources.optionals as optionals @@ -19,7 +19,7 @@ from frostfs_testlib.steps.node_management import include_node_to_network_map, r from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success @@ -247,20 +247,23 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Restart {service_type} service on {node}") - def restart_service_of_type(self, node: ClusterNode, service_type: ServiceClass): - service = node.service(service_type) - service.restart_service() + @reporter.step("Stop all storage services on cluster") + def stop_all_storage_services(self, reversed_order: bool = False): + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + for node in nodes: + self.stop_service_of_type(node, StorageNode) + + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Restart all {service_type} services") - def restart_services_of_type(self, service_type: type[ServiceClass]): - services = self.cluster.services(service_type) - parallel([service.restart_service for service in services]) + @reporter.step("Stop all S3 gates on cluster") + def stop_all_s3_gates(self, reversed_order: bool = False): + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - if service_type == StorageNode: - self.wait_after_storage_startup() + for node in nodes: + self.stop_service_of_type(node, S3Gate) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -274,6 +277,30 @@ class ClusterStateController: def start_storage_service(self, node: ClusterNode): self.start_service_of_type(node, StorageNode) + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start stopped storage services") + def start_stopped_storage_services(self): + self.start_stopped_services_of_type(StorageNode) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Stop s3 gate on {node}") + def stop_s3_gate(self, node: ClusterNode, mask: bool = True): + self.stop_service_of_type(node, S3Gate, mask) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start s3 gate on {node}") + def start_s3_gate(self, node: ClusterNode): + self.start_service_of_type(node, S3Gate) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start stopped S3 gates") + def start_stopped_s3_gates(self): + self.start_stopped_services_of_type(S3Gate) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): @@ -363,33 +390,31 @@ class ClusterStateController: @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: shell = node.host.get_shell() - return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") + return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") - @reporter.step("Set time on nodes in {in_date}") - def change_date_on_all_nodes(self, cluster: Cluster, in_date: datetime) -> None: - parallel(self.change_node_date, cluster.cluster_nodes, in_date=in_date) - - @reporter.step("Set time on {node} to {in_date}") + @reporter.step("Set node time to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() - in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") - shell.exec(f"timedatectl set-time '{in_date_frmt}'") + shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") + shell.exec("hwclock --systohc") node_time = self.get_node_date(node) - with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): - assert (node_time - in_date).total_seconds() < 60 + assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) - @reporter.step("Restore time on nodes") - def restore_date_on_all_nodes(self, cluster: Cluster) -> None: - parallel(self.restore_node_date, cluster.cluster_nodes) - - @reporter.step("Restore time on {node}") + @reporter.step(f"Restore time") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() - now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") - + now_time = datetime.datetime.now(datetime.timezone.utc) with reporter.step(f"Set {now_time} time"): - shell.exec(f"timedatectl set-time '{now_time}'") + shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") + shell.exec("hwclock --systohc") + + @reporter.step("Change the synchronizer status to {status}") + def set_sync_date_all_nodes(self, status: str): + if status == "active": + parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) + return + parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: @@ -429,11 +454,9 @@ class ClusterStateController: if not checker_node: checker_node = cluster_node netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) - netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node] + netmap = [node for node in netmap if cluster_node.host_ip == node.node] if status == NodeStatus.OFFLINE: - assert ( - cluster_node.get_interface(Interfaces.MGMT) not in netmap - ), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline" + assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" else: assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" @@ -475,6 +498,16 @@ class ClusterStateController: frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) return frostfs_adm, frostfs_cli, frostfs_cli_remote + def _enable_date_synchronizer(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("timedatectl set-ntp true") + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15) + + def _disable_date_synchronizer(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("timedatectl set-ntp false") + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15) + def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: disk_controller_id = DiskController.get_id(node, device) if disk_controller_id in self.detached_disks.keys(): diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py index 8969015..81e757c 100644 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -1,9 +1,3 @@ -import time -from functools import wraps -from typing import Callable - -import pytest - from frostfs_testlib.hosting import Host from frostfs_testlib.shell.interfaces import CommandResult @@ -13,11 +7,11 @@ class Metrics: self.storage = StorageMetrics(host, metrics_endpoint) + class StorageMetrics: """ Class represents storage metrics in a cluster """ - def __init__(self, host: Host, metrics_endpoint: str) -> None: self.host = host self.metrics_endpoint = metrics_endpoint @@ -35,46 +29,8 @@ class StorageMetrics: additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") return result - + def get_all_metrics(self) -> CommandResult: shell = self.host.get_shell() result = shell.exec(f"curl -s {self.metrics_endpoint}") return result - - -def wait_until_metric_result_is_stable( - relative_deviation: float = None, absolute_deviation: int = None, max_attempts: int = 10, sleep_interval: int = 30 -): - """ - A decorator function that repeatedly calls the decorated function until its result stabilizes - within a specified relative tolerance or until the maximum number of attempts is reached. - - This decorator is useful for scenarios where a function returns a metric or value that may fluctuate - over time, and you want to ensure that the result has stabilized before proceeding. - """ - - def decorator(func: Callable): - @wraps(func) - def wrapper(*args, **kwargs): - last_result = None - for _ in range(max_attempts): - # first function call - first_result = func(*args, **kwargs) - - # waiting before the second call - time.sleep(sleep_interval) - - # second function call - last_result = func(*args, **kwargs) - - # checking value stability - if first_result == pytest.approx(last_result, rel=relative_deviation, abs=absolute_deviation): - return last_result - - # if stability is not achieved, return the last value - if last_result is not None: - return last_result - - return wrapper - - return decorator diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 5c8b723..180877d 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -82,9 +82,6 @@ class NodeBase(HumanReadableABC): def get_metrics_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) - def get_pprof_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF) - def stop_service(self, mask: bool = True): if mask: with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): @@ -147,13 +144,6 @@ class NodeBase(HumanReadableABC): else None ) - def get_working_dir_path(self) -> Optional[str]: - """ - Returns working directory path located on remote host - """ - config_attributes = self.host.get_service_config(self.name) - return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None - @property def config_dir(self) -> str: return self._get_attribute(ConfigAttributes.CONFIG_DIR) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 4c303fc..55a8388 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,9 +1,6 @@ -import re from dataclasses import dataclass from typing import Optional -from pydantic import BaseModel, Field, field_validator - from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum @@ -78,37 +75,8 @@ class NodeNetInfo: withdrawal_fee: str = None homomorphic_hashing_disabled: str = None maintenance_mode_allowed: str = None - - -class Attributes(BaseModel): - cluster_name: str = Field(alias="ClusterName") - continent: str = Field(alias="Continent") - country: str = Field(alias="Country") - country_code: str = Field(alias="CountryCode") - external_addr: list[str] = Field(alias="ExternalAddr") - location: str = Field(alias="Location") - node: str = Field(alias="Node") - subdiv: str = Field(alias="SubDiv") - subdiv_code: str = Field(alias="SubDivCode") - un_locode: str = Field(alias="UN-LOCODE") - role: str = Field(alias="role") - - @field_validator("external_addr", mode="before") - @classmethod - def convert_external_addr(cls, value: str) -> list[str]: - return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)] - - -class NodeInfo(BaseModel): - public_key: str = Field(alias="publicKey") - addresses: list[str] = Field(alias="addresses") - state: str = Field(alias="state") - attributes: Attributes = Field(alias="attributes") - - @field_validator("addresses", mode="before") - @classmethod - def convert_external_addr(cls, value: str) -> list[str]: - return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))] + eigen_trust_alpha: str = None + eigen_trust_iterations: str = None @dataclass diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py index d9f94b2..8cef23b 100644 --- a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py +++ b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py @@ -1,15 +1,14 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.storage.grpc_operations.implementations import container, object -class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper): +class CliClientWrapper(interfaces.GrpcClientWrapper): def __init__(self, cli: FrostfsCli) -> None: self.cli = cli - self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli) - self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli) - self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli) - self.ape_manager: interfaces.ApeManagerInterface = implementations.ApeManagerOperations(self.cli) + self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli) + self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli) -class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper): +class RpcClientWrapper(interfaces.GrpcClientWrapper): pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py index df820fa..e69de29 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py @@ -1,5 +0,0 @@ -from .ape_manager import ApeManagerOperations -from .chunks import ChunksOperations -from .container import ContainerOperations -from .netmap import NetmapOperations -from .object import ObjectOperations diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py b/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py deleted file mode 100644 index 070d8a6..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py +++ /dev/null @@ -1,79 +0,0 @@ -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT - - -class ApeManagerOperations: - def __init__(self, cli: FrostfsCli): - self.cli = cli - - @reporter.step("Add ape rule") - def add( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - path: Optional[str] = None, - rule: Optional[str] | Optional[list[str]] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.ape_manager.add( - rpc_endpoint=rpc_endpoint, - chain_id=chain_id, - chain_id_hex=chain_id_hex, - path=path, - rule=rule, - target_name=target_name, - target_type=target_type, - wallet=wallet, - address=address, - timeout=timeout, - ) - - @reporter.step("Get list APE rules") - def list( - self, - rpc_endpoint: str, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.ape_manager.list( - rpc_endpoint=rpc_endpoint, - target_name=target_name, - target_type=target_type, - wallet=wallet, - address=address, - timeout=timeout, - ) - - @reporter.step("Remove APE rule") - def remove( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.ape_manager.remove( - rpc_endpoint=rpc_endpoint, - chain_id=chain_id, - chain_id_hex=chain_id_hex, - target_name=target_name, - target_type=target_type, - wallet=wallet, - address=address, - timeout=timeout, - ) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index 0d787e2..7f3161c 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -6,7 +6,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.cli_utils import parse_netmap_output @@ -30,7 +30,7 @@ class ChunksOperations(interfaces.ChunksInterface): result = [] for node_info in netmap: for cluster_node in cluster.cluster_nodes: - if node_info.node == cluster_node.get_interface(Interfaces.MGMT): + if node_info.node == cluster_node.host_ip: result.append(cluster_node) return result @@ -40,7 +40,7 @@ class ChunksOperations(interfaces.ChunksInterface): for node_info in netmap: if node_info.node_id in chunk.confirmed_nodes: for cluster_node in cluster.cluster_nodes: - if cluster_node.get_interface(Interfaces.MGMT) == node_info.node: + if cluster_node.host_ip == node_info.node: return (cluster_node, node_info) @wait_for_success(300, 5, fail_testcase=None) @@ -161,5 +161,5 @@ class ChunksOperations(interfaces.ChunksInterface): def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: parse_result = json.loads(object_nodes) if parse_result.get("errors"): - raise RuntimeError(", ".join(parse_result["errors"])) + raise parse_result["errors"] return [Chunk(**chunk) for chunk in parse_result["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index afdf6cb..75af00c 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -1,7 +1,6 @@ import json import logging import re -from time import sleep from typing import List, Optional, Union from frostfs_testlib import reporter @@ -302,16 +301,6 @@ class ContainerOperations(interfaces.ContainerInterface): resolver: BucketContainerResolver = resolver_cls() return resolver.resolve(node, name) - @reporter.step("Wait create container, with list") - def wait_creation(self, cid: str, endpoint: str, attempts: int = 15, sleep_interval: int = 1): - for _ in range(attempts): - containers = self.list(endpoint) - if cid in containers: - return - logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") - sleep(sleep_interval) - raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") - def _parse_cid(self, output: str) -> str: """ Parses container ID from a given CLI output. The input string we expect: diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py deleted file mode 100644 index 76ee69a..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py +++ /dev/null @@ -1,171 +0,0 @@ -import json as module_json -from typing import List, Optional - -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.cli.netmap_parser import NetmapParser -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo - -from .. import interfaces - - -class NetmapOperations(interfaces.NetmapInterface): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - - def epoch( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> int: - """ - Get current epoch number. - """ - output = ( - self.cli.netmap.epoch( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return int(output) - - def netinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeNetInfo: - """ - Get target node info. - """ - output = ( - self.cli.netmap.netinfo( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.netinfo(output) - - def nodeinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> NodeInfo: - """ - Get target node info. - """ - output = ( - self.cli.netmap.nodeinfo( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - json=json, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.node_info(module_json.loads(output)) - - def snapshot( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[NodeNetmapInfo]: - """ - Get target node info. - """ - output = ( - self.cli.netmap.snapshot( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.snapshot_all_nodes(output) - - def snapshot_one_node( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[NodeNetmapInfo]: - """ - Get target one node info. - """ - output = ( - self.cli.netmap.snapshot( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.snapshot_one_node(output, rpc_endpoint) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index be8a470..f31f223 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -11,7 +11,6 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations from frostfs_testlib.testing.test_control import wait_for_success @@ -675,7 +674,7 @@ class ObjectOperations(interfaces.ObjectInterface): cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) + if netmap_node.node == cluster_node.host_ip ] return object_nodes diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py new file mode 100644 index 0000000..07fe52f --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -0,0 +1,424 @@ +from abc import ABC, abstractmethod +from typing import Any, List, Optional + +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.constants import PlacementRule +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo +from frostfs_testlib.utils import file_utils + + +class ChunksInterface(ABC): + @abstractmethod + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + pass + + @abstractmethod + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + pass + + @abstractmethod + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + pass + + @abstractmethod + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + pass + + @abstractmethod + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + @abstractmethod + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + +class ObjectInterface(ABC): + def __init__(self) -> None: + self.chunks: ChunksInterface + + @abstractmethod + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> file_utils.TestFile: + pass + + @abstractmethod + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def hash( + self, + endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult | Any: + pass + + @abstractmethod + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: Optional[list[str]] = None, + payloads: Optional[list[str]] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ) -> str: + pass + + @abstractmethod + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> tuple[file_utils.TestFile, bytes]: + pass + + @abstractmethod + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + oid: Optional[str] = None, + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, + ) -> List: + pass + + @abstractmethod + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + pass + + @abstractmethod + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[str]: + pass + + +class ContainerInterface(ABC): + @abstractmethod + def create( + self, + endpoint: str, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, + attributes: Optional[dict] = None, + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, + name: Optional[str] = None, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + """ + Create a new container and register it in the FrostFS. + It will be stored in the sidechain when the Inner Ring accepts it. + """ + raise NotImplementedError("No implemethed method create") + + @abstractmethod + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ) -> List[str]: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + """ + raise NotImplementedError("No implemethed method delete") + + @abstractmethod + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get container field info.""" + raise NotImplementedError("No implemethed method get") + + @abstractmethod + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get extended ACL table of container.""" + raise NotImplementedError("No implemethed method get-eacl") + + @abstractmethod + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + **params, + ) -> List[str]: + """List all created containers.""" + raise NotImplementedError("No implemethed method list") + + @abstractmethod + def nodes( + self, + endpoint: str, + cid: str, + cluster: Cluster, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + """Show the nodes participating in the container in the current epoch.""" + raise NotImplementedError("No implemethed method nodes") + + +class GrpcClientWrapper(ABC): + def __init__(self) -> None: + self.object: ObjectInterface + self.container: ContainerInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py deleted file mode 100644 index 379bbe0..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .ape_manager import ApeManagerInterface -from .chunks import ChunksInterface -from .container import ContainerInterface -from .netmap import NetmapInterface -from .object import ObjectInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py deleted file mode 100644 index 5b198bc..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py +++ /dev/null @@ -1,48 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional - -from frostfs_testlib.shell.interfaces import CommandResult - - -class ApeManagerInterface(ABC): - @abstractmethod - def add( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - path: Optional[str] = None, - rule: Optional[str] | Optional[list[str]] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - pass - - @abstractmethod - def list( - self, - rpc_endpoint: str, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - pass - - @abstractmethod - def remove( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py deleted file mode 100644 index 986b938..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py +++ /dev/null @@ -1,79 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional - -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo - - -class ChunksInterface(ABC): - @abstractmethod - def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: - pass - - @abstractmethod - def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: - pass - - @abstractmethod - def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: - pass - - @abstractmethod - def get_all( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> list[Chunk]: - pass - - @abstractmethod - def get_parity( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass - - @abstractmethod - def get_first_data( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py deleted file mode 100644 index 397f7b2..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py +++ /dev/null @@ -1,129 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -from frostfs_testlib.storage.cluster import Cluster, ClusterNode - - -class ContainerInterface(ABC): - @abstractmethod - def create( - self, - endpoint: str, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - basic_acl: Optional[str] = None, - await_mode: bool = False, - disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, - name: Optional[str] = None, - nonce: Optional[str] = None, - policy: Optional[str] = None, - session: Optional[str] = None, - subnet: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - """ - Create a new container and register it in the FrostFS. - It will be stored in the sidechain when the Inner Ring accepts it. - """ - raise NotImplementedError("No implemethed method create") - - @abstractmethod - def delete( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - await_mode: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - force: bool = False, - trace: bool = False, - ) -> List[str]: - """ - Delete an existing container. - Only the owner of the container has permission to remove the container. - """ - raise NotImplementedError("No implemethed method delete") - - @abstractmethod - def get( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - json_mode: bool = True, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get container field info.""" - raise NotImplementedError("No implemethed method get") - - @abstractmethod - def get_eacl( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - json_mode: bool = True, - trace: bool = False, - to: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get extended ACL table of container.""" - raise NotImplementedError("No implemethed method get-eacl") - - @abstractmethod - def list( - self, - endpoint: str, - name: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - owner: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - **params, - ) -> List[str]: - """List all created containers.""" - raise NotImplementedError("No implemethed method list") - - @abstractmethod - def nodes( - self, - endpoint: str, - cid: str, - cluster: Cluster, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - """Show the nodes participating in the container in the current epoch.""" - raise NotImplementedError("No implemethed method nodes") - - @abstractmethod - def wait_creation(self, cid: str, endpoint: str, attempts: Optional[str], sleep_interval: Optional[int]) -> None: - raise NotImplementedError("No implemented method wait_creation") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py deleted file mode 100644 index 3fdc98a..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py +++ /dev/null @@ -1,89 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo - - -class NetmapInterface(ABC): - @abstractmethod - def epoch( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = False, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> int: - """ - Get current epoch number. - """ - raise NotImplementedError("No implemethed method epoch") - - @abstractmethod - def netinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeNetInfo: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method netinfo") - - @abstractmethod - def nodeinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeInfo: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method nodeinfo") - - @abstractmethod - def snapshot( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[NodeNetmapInfo]: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method snapshot") - - @abstractmethod - def snapshot_one_node( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[NodeNetmapInfo]: - """ - Get target one node info. - """ - raise NotImplementedError("No implemethed method snapshot") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py deleted file mode 100644 index 550c461..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py +++ /dev/null @@ -1,223 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, List, Optional - -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.utils import file_utils - -from .chunks import ChunksInterface - - -class ObjectInterface(ABC): - def __init__(self) -> None: - self.chunks: ChunksInterface - - @abstractmethod - def delete( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def get( - self, - cid: str, - oid: str, - endpoint: str, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> file_utils.TestFile: - pass - - @abstractmethod - def get_from_random_node( - self, - cid: str, - oid: str, - cluster: Cluster, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def hash( - self, - endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - range: Optional[str] = None, - salt: Optional[str] = None, - ttl: Optional[int] = None, - session: Optional[str] = None, - hash_type: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def head( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - json_output: bool = True, - is_raw: bool = False, - is_direct: bool = False, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult | Any: - pass - - @abstractmethod - def lock( - self, - cid: str, - oid: str, - endpoint: str, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def put( - self, - path: str, - cid: str, - endpoint: str, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def patch( - self, - cid: str, - oid: str, - endpoint: str, - ranges: Optional[list[str]] = None, - payloads: Optional[list[str]] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - bearer: Optional[str] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - trace: bool = False, - ) -> str: - pass - - @abstractmethod - def put_to_random_node( - self, - path: str, - cid: str, - cluster: Cluster, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def range( - self, - cid: str, - oid: str, - range_cut: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> tuple[file_utils.TestFile, bytes]: - pass - - @abstractmethod - def search( - self, - cid: str, - endpoint: str, - bearer: str = "", - oid: Optional[str] = None, - filters: Optional[dict] = None, - expected_objects_list: Optional[list] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - phy: bool = False, - root: bool = False, - timeout: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - ttl: Optional[int] = None, - ) -> List: - pass - - @abstractmethod - def nodes( - self, - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - pass - - @abstractmethod - def parts( - self, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[str]: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py deleted file mode 100644 index 5edc99f..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py +++ /dev/null @@ -1,14 +0,0 @@ -from abc import ABC - -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli - -from . import interfaces - - -class GrpcClientWrapper(ABC): - def __init__(self) -> None: - self.cli: FrostfsCli - self.object: interfaces.ObjectInterface - self.container: interfaces.ContainerInterface - self.netmap: interfaces.NetmapInterface - self.ape_manager: interfaces.ApeManagerInterface diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 8787296..0f9fef2 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -68,7 +68,7 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" f"RC: {return_code}\n" - f"Start / End / Elapsed\t {start_time} / {end_time} / {end_time - start_time}" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}" ) with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): reporter.attach(command_attachment, "Command execution") diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 0676085..490abb0 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -64,7 +64,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: try: result = shell.exec(f"{binary_path} {binary['param']}") version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" - versions_at_host[binary_name] = version.strip() + versions_at_host[binary_name] = version except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") versions_at_host[binary_name] = "Unknown" diff --git a/tests/test_load_config.py b/tests/test_load_config.py index fbeb587..883b1f2 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -6,7 +6,10 @@ import pytest from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME +from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController +from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode +from frostfs_testlib.storage.dataclasses.node_base import NodeBase @dataclass @@ -126,8 +129,6 @@ class TestLoadConfig: "--size '11'", "--acl 'acl'", "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -160,8 +161,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -318,8 +317,6 @@ class TestLoadConfig: "--no-verify-ssl", "--size '11'", "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -353,8 +350,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -420,26 +415,6 @@ class TestLoadConfig: self._check_preset_params(load_params, params) - @pytest.mark.parametrize( - "load_type, input, value, params", - [ - (LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]), - (LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]), - (LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]), - (LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]), - (LoadType.gRPC, None, None, []), - (LoadType.S3, ["A C ", " B"], ["A C", "B"], []), - (LoadType.S3, None, None, []), - ], - ) - def test_ape_list_parsing_formatter(self, load_type, input, value, params): - load_params = LoadParams(load_type) - load_params.preset = Preset() - load_params.preset.rule = input - assert load_params.preset.rule == value - - self._check_preset_params(load_params, params) - @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): expected_env_vars = { @@ -469,8 +444,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", - "--retry '0'", - "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -502,8 +475,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", - "--retry '0'", - "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -611,8 +582,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", - "--retry '0'", - "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -644,8 +613,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", - "--retry '0'", - "--rule ''", "--out ''", "--workers '0'", "--containers '0'",