diff --git a/CODEOWNERS b/CODEOWNERS index 519ca42..4a621d3 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,3 +1 @@ -.* @TrueCloudLab/qa-committers -.forgejo/.* @potyarkin -Makefile @potyarkin +* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov diff --git a/pyproject.toml b/pyproject.toml index d62f04b..3faa637 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ dependencies = [ "pytest==7.1.2", "tenacity==8.0.1", "boto3==1.35.30", - "boto3-stubs[s3,iam,sts]==1.35.30", + "boto3-stubs[essential]==1.35.30", ] requires-python = ">=3.10" @@ -62,7 +62,7 @@ authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3Credentia wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider" [project.entry-points."frostfs.testlib.bucket_cid_resolver"] -frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver" +frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver" [tool.isort] profile = "black" diff --git a/requirements.txt b/requirements.txt index 56d9b83..e012366 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,8 +9,7 @@ testrail-api==1.12.0 tenacity==8.0.1 pytest==7.1.2 boto3==1.35.30 -boto3-stubs[s3,iam,sts]==1.35.30 -pydantic==2.10.6 +boto3-stubs[essential]==1.35.30 # Dev dependencies black==22.8.0 @@ -22,4 +21,4 @@ pylint==2.17.4 # Packaging dependencies build==0.8.0 setuptools==65.3.0 -twine==4.0.1 \ No newline at end of file +twine==4.0.1 diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 4724a8b..1ceb972 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,4 +1,4 @@ __version__ = "2.0.1" -from .fixtures import configure_testlib, hosting, session_start_time, temp_directory -from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems +from .fixtures import configure_testlib, hosting, temp_directory +from .hooks import pytest_collection_modifyitems diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 224e9e3..3600e77 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -1,11 +1,10 @@ from typing import Optional from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell -from frostfs_testlib.utils.datetime_utils import parse_time class CliCommand: - TIMEOUT_INACCURACY = 10 + WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" @@ -25,7 +24,9 @@ class CliCommand: def __init__(self, shell: Shell, cli_exec_path: str, **base_params): self.shell = shell self.cli_exec_path = cli_exec_path - self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value]) + self.__base_params = " ".join( + [f"--{param} {value}" for param, value in base_params.items() if value] + ) def _format_command(self, command: str, **params) -> str: param_str = [] @@ -47,7 +48,9 @@ class CliCommand: val_str = str(value_item).replace("'", "\\'") param_str.append(f"--{param} '{val_str}'") elif isinstance(value, dict): - param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'') + param_str.append( + f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'' + ) else: if "'" in str(value): value_str = str(value).replace('"', '\\"') @@ -60,22 +63,12 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - if timeout := params.get("timeout"): - timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY - - return self.shell.exec( - self._format_command(command, **params), - CommandOptions(timeout=timeout), - ) + return self.shell.exec(self._format_command(command, **params)) def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: - if timeout := params.get("timeout"): - timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY - return self.shell.exec( self._format_command(command, **params), - CommandOptions( - interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)], - timeout=timeout, + options=CommandOptions( + interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)] ), ) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index bdf4a91..5e39cf4 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -463,26 +463,3 @@ class FrostfsAdmMorph(CliCommand): "morph ape rm-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, ) - - def get_nns_records( - self, - name: str, - type: Optional[str] = None, - rpc_endpoint: Optional[str] = None, - alphabet_wallets: Optional[str] = None, - ) -> CommandResult: - """Returns domain record of the specified type - - Args: - name: Domain name - type: Domain name service record type(A|CNAME|SOA|TXT) - rpc_endpoint: N3 RPC node endpoint - alphabet_wallets: path to alphabet wallets dir - - Returns: - Command's result - """ - return self._execute( - "morph nns get-records", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index 7874f18..d83b7ae 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -29,7 +29,6 @@ class FrostfsCli: util: FrostfsCliUtil version: FrostfsCliVersion control: FrostfsCliControl - ape_manager: FrostfsCliApeManager def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index cd197d3..d219940 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -12,7 +12,6 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -43,7 +42,6 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -75,7 +73,6 @@ class FrostfsCliNetmap(CliCommand): generate_key: bool = False, json: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -107,7 +104,6 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index e536544..1857987 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -276,54 +276,6 @@ class FrostfsCliObject(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) - def patch( - self, - rpc_endpoint: str, - cid: str, - oid: str, - range: list[str] = None, - payload: list[str] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - trace: bool = False, - ttl: Optional[int] = None, - wallet: Optional[str] = None, - xhdr: Optional[dict] = None, - ) -> CommandResult: - """ - PATCH an object. - - Args: - rpc_endpoint: Remote node address (as 'multiaddr' or ':') - cid: Container ID - oid: Object ID - range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] - payload: An array of file paths to be applied in each range - new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2 - replace_attrs: Replace all attributes completely with new ones specified in new_attrs - address: Address of wallet account - bearer: File with signed JSON or binary encoded bearer token - generate_key: Generate new private key - session: Filepath to a JSON- or binary-encoded token of the object RANGE session - timeout: Timeout for the operation - trace: Generate trace ID and print it - ttl: TTL value in request meta header (default 2) - wallet: WIF (NEP-2) string or path to the wallet or binary key - xhdr: Dict with request X-Headers - - Returns: - Command's result. - """ - return self._execute( - "object patch", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - def range( self, rpc_endpoint: str, diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 68a2f54..82ea87b 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -241,21 +241,3 @@ class FrostfsCliShards(CliCommand): "control shards evacuation status", **{param: value for param, value in locals().items() if param not in ["self"]}, ) - - def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None): - """ - Detach and close the shards - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - id: List of shard IDs in base58 encoding - timeout: Timeout for an operation (default 15s) - - Returns: - Command's result. - """ - return self._execute( - "control shards detach", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 4b4a501..23ac4da 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -20,6 +20,8 @@ class NetmapParser: "withdrawal_fee": r"Withdrawal fee: (?P\d+)", "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", + "eigen_trust_alpha": r"EigenTrustAlpha: (?P\d+\w+$)", + "eigen_trust_iterations": r"EigenTrustIterations: (?P\d+)", } parse_result = {} @@ -62,7 +64,7 @@ class NetmapParser: for node in netmap_nodes: for key, regex in regexes.items(): search_result = re.search(regex, node, flags=re.MULTILINE) - if search_result is None: + if search_result == None: result_netmap[key] = None continue if key == "node_data_ips": @@ -81,22 +83,9 @@ class NetmapParser: return dataclasses_netmap @staticmethod - def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None: + def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - for snapshot in snapshot_nodes: - for endpoint in snapshot.external_address: - if rpc_endpoint.split(":")[0] in endpoint: - return snapshot - - @staticmethod - def node_info(output: dict) -> NodeInfo: - data_dict = {"attributes": {}} - - for key, value in output.items(): - if key != "attributes": - data_dict[key] = value - - for attribute in output["attributes"]: - data_dict["attributes"][attribute["key"]] = attribute["value"] - - return NodeInfo(**data_dict) + snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip] + if not snapshot_node: + return None + return snapshot_node[0] diff --git a/src/frostfs_testlib/clients/__init__.py b/src/frostfs_testlib/clients/__init__.py deleted file mode 100644 index e46766b..0000000 --- a/src/frostfs_testlib/clients/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from frostfs_testlib.clients.http.http_client import HttpClient -from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper -from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient diff --git a/src/frostfs_testlib/clients/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py deleted file mode 100644 index ab6e2b0..0000000 --- a/src/frostfs_testlib/clients/http/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from frostfs_testlib.clients.http.http_client import HttpClient diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py deleted file mode 100644 index 16d7707..0000000 --- a/src/frostfs_testlib/clients/http/http_client.py +++ /dev/null @@ -1,152 +0,0 @@ -import io -import json -import logging -import logging.config -from typing import Mapping, Sequence - -import httpx - -from frostfs_testlib import reporter - -timeout = httpx.Timeout(60, read=150) -LOGGING_CONFIG = { - "disable_existing_loggers": False, - "version": 1, - "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, - "formatters": { - "http": { - "format": "%(asctime)s [%(levelname)s] %(name)s - %(message)s", - "datefmt": "%Y-%m-%d %H:%M:%S", - } - }, - "loggers": { - "httpx": { - "handlers": ["default"], - "level": "ERROR", - }, - "httpcore": { - "handlers": ["default"], - "level": "ERROR", - }, - }, -} - -logging.config.dictConfig(LOGGING_CONFIG) -logger = logging.getLogger("NeoLogger") - - -class HttpClient: - @reporter.step("Send {method} request to {url}") - def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response: - transport = httpx.HTTPTransport(verify=False, retries=5) - client = httpx.Client(timeout=timeout, transport=transport) - response = client.request(method, url, **kwargs) - - self._attach_response(response, **kwargs) - # logger.info(f"Response: {response.status_code} => {response.text}") - - if expected_status_code: - assert ( - response.status_code == expected_status_code - ), f"Got {response.status_code} response code while {expected_status_code} expected" - - return response - - @classmethod - def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None: - try: - content = readable.read() - except Exception as e: - logger.warning(f"Unable to read file: {str(e)}") - return None - - if not content: - return None - - request_body = None - - try: - request_body = json.loads(content) - except (json.JSONDecodeError, UnicodeDecodeError) as e: - logger.warning(f"Unable to convert body to json: {str(e)}") - - if request_body is not None: - return json.dumps(request_body, default=str, indent=4) - - try: - request_body = content.decode() - except UnicodeDecodeError as e: - logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}") - - request_body = content if request_body is None else request_body - request_body = "" if len(request_body) > 1000 else request_body - - return request_body - - @classmethod - def _parse_files(cls, files: Mapping | Sequence | None) -> dict: - filepaths = {} - - if not files: - return filepaths - - if isinstance(files, Sequence): - items = files - elif isinstance(files, Mapping): - items = files.items() - else: - raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}") - - for name, file in items: - if isinstance(file, io.IOBase): - filepaths[name] = file.name - elif isinstance(file, Sequence): - filepaths[name] = file[1].name - - return filepaths - - @classmethod - def _attach_response(cls, response: httpx.Response, **kwargs): - request = response.request - request_headers = json.dumps(dict(request.headers), default=str, indent=4) - request_body = cls._parse_body(request) - - files = kwargs.get("files") - request_files = cls._parse_files(files) - - response_headers = json.dumps(dict(response.headers), default=str, indent=4) - response_body = cls._parse_body(response) - - report = ( - f"Method: {request.method}\n\n" - + f"URL: {request.url}\n\n" - + f"Request Headers: {request_headers}\n\n" - + (f"Request Body: {request_body}\n\n" if request_body else "") - + (f"Request Files: {request_files}\n\n" if request_files else "") - + f"Response Status Code: {response.status_code}\n\n" - + f"Response Headers: {response_headers}\n\n" - + (f"Response Body: {response_body}\n\n" if response_body else "") - ) - curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files) - - reporter.attach(report, "Requests Info") - reporter.attach(curl_request, "CURL") - cls._write_log(curl_request, response_body, response.status_code) - - @classmethod - def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: - excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"} - headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers) - - data = f" -d '{data}'" if data else "" - for name, path in files.items(): - data += f' -F "{name}=@{path}"' - - # Option -k means no verify SSL - return f"curl {url} -X {method} {headers}{data} -k" - - @classmethod - def _write_log(cls, curl: str, res_body: str, res_code: int) -> None: - if res_body: - curl += f"\nResponse: {res_code}\n{res_body}" - logger.info(f"{curl}") diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py deleted file mode 100644 index 5481f48..0000000 --- a/src/frostfs_testlib/clients/s3/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/clients/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py deleted file mode 100644 index f6f423d..0000000 --- a/src/frostfs_testlib/clients/s3/s3_http_client.py +++ /dev/null @@ -1,149 +0,0 @@ -import hashlib -import logging -import xml.etree.ElementTree as ET - -import httpx -from botocore.auth import SigV4Auth -from botocore.awsrequest import AWSRequest -from botocore.credentials import Credentials - -from frostfs_testlib import reporter -from frostfs_testlib.clients import HttpClient -from frostfs_testlib.utils.file_utils import TestFile - -logger = logging.getLogger("NeoLogger") - -DEFAULT_TIMEOUT = 60.0 - - -class S3HttpClient: - def __init__( - self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" - ) -> None: - self.http_client = HttpClient() - self.credentials = Credentials(access_key_id, secret_access_key) - self.profile = profile - self.region = region - - self.iam_endpoint: str = None - self.s3gate_endpoint: str = None - self.service: str = None - self.signature: SigV4Auth = None - - self.set_endpoint(s3gate_endpoint) - - def _to_s3_header(self, header: str) -> dict: - replacement_map = { - "Acl": "ACL", - "_": "-", - } - - result = header - if not header.startswith("x_amz"): - result = header.title() - - for find, replace in replacement_map.items(): - result = result.replace(find, replace) - - return result - - def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None): - exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] - return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None} - - def _create_aws_request( - self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None - ) -> AWSRequest: - data = b"" - - if content is not None: - if isinstance(content, TestFile): - with open(content, "rb") as io_content: - data = io_content.read() - elif isinstance(content, str): - data = bytes(content, encoding="utf-8") - elif isinstance(content, bytes): - data = content - else: - raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}") - - headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest() - aws_request = AWSRequest(method, url, headers, data, params) - self.signature.add_auth(aws_request) - - return aws_request - - def _exec_request( - self, - method: str, - url: str, - headers: dict, - content: str | bytes | TestFile = None, - params: dict = None, - timeout: float = DEFAULT_TIMEOUT, - ) -> dict: - aws_request = self._create_aws_request(method, url, headers, content, params) - response = self.http_client.send( - aws_request.method, - aws_request.url, - headers=dict(aws_request.headers), - data=aws_request.data, - params=aws_request.params, - timeout=timeout, - ) - - try: - response.raise_for_status() - except httpx.HTTPStatusError: - raise httpx.HTTPStatusError(response.text, request=response.request, response=response) - - root = ET.fromstring(response.read()) - data = { - "LastModified": root.find(".//LastModified").text, - "ETag": root.find(".//ETag").text, - } - - if response.headers.get("x-amz-version-id"): - data["VersionId"] = response.headers.get("x-amz-version-id") - - return data - - @reporter.step("Set endpoint S3 to {s3gate_endpoint}") - def set_endpoint(self, s3gate_endpoint: str): - if self.s3gate_endpoint == s3gate_endpoint: - return - - self.s3gate_endpoint = s3gate_endpoint - self.service = "s3" - self.signature = SigV4Auth(self.credentials, self.service, self.region) - - @reporter.step("Set endpoint IAM to {iam_endpoint}") - def set_iam_endpoint(self, iam_endpoint: str): - if self.iam_endpoint == iam_endpoint: - return - - self.iam_endpoint = iam_endpoint - self.service = "iam" - self.signature = SigV4Auth(self.credentials, self.service, self.region) - - @reporter.step("Patch object S3") - def patch_object( - self, - bucket: str, - key: str, - content: str | bytes | TestFile, - content_range: str, - version_id: str = None, - if_match: str = None, - if_unmodified_since: str = None, - x_amz_expected_bucket_owner: str = None, - timeout: float = DEFAULT_TIMEOUT, - ) -> dict: - if content_range and not content_range.startswith("bytes"): - content_range = f"bytes {content_range}/*" - - url = f"{self.s3gate_endpoint}/{bucket}/{key}" - headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"]) - params = {"VersionId": version_id} if version_id is not None else None - - return self._exec_request("PATCH", url, headers, content, params, timeout=timeout) diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py index 7d767d2..d0f92f2 100644 --- a/src/frostfs_testlib/fixtures.py +++ b/src/frostfs_testlib/fixtures.py @@ -1,6 +1,5 @@ import logging import os -from datetime import datetime from importlib.metadata import entry_points import pytest @@ -12,12 +11,6 @@ from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE from frostfs_testlib.storage import get_service_registry -@pytest.fixture(scope="session", autouse=True) -def session_start_time(): - start_time = datetime.utcnow() - return start_time - - @pytest.fixture(scope="session") def configure_testlib(): reporter.get_reporter().register_handler(reporter.AllureHandler()) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index d7e4cc8..6830e78 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -1,8 +1,8 @@ import pytest -@pytest.hookimpl(specname="pytest_collection_modifyitems") -def pytest_add_frostfs_marker(items: list[pytest.Item]): +@pytest.hookimpl +def pytest_collection_modifyitems(items: list[pytest.Item]): # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding # nodeid = full path of the test # 1. plugins @@ -11,21 +11,3 @@ def pytest_add_frostfs_marker(items: list[pytest.Item]): location = item.location[0] if "frostfs" in location and "plugin" not in location and "testlib" not in location: item.add_marker("frostfs") - - -# pytest hook. Do not rename -@pytest.hookimpl(trylast=True) -def pytest_collection_modifyitems(items: list[pytest.Item]): - # The order of running tests corresponded to the suites - items.sort(key=lambda item: item.location[0]) - - # Change order of tests based on @pytest.mark.order() marker - def order(item: pytest.Item) -> int: - order_marker = item.get_closest_marker("order") - if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)): - raise RuntimeError("Incorrect usage of pytest.mark.order") - - order_value = order_marker.args[0] if order_marker else 0 - return order_value - - items.sort(key=lambda item: order(item)) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index d458b0a..5110e63 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -164,9 +164,6 @@ class DockerHost(Host): return volume_path - def send_signal_to_service(self, service_name: str, signal: str) -> None: - raise NotImplementedError("Not implemented for docker") - def delete_metabase(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") @@ -250,7 +247,6 @@ class DockerHost(Host): unit: Optional[str] = None, exclude_filter: Optional[str] = None, priority: Optional[str] = None, - word_count: bool = None, ) -> str: client = self._get_docker_client() filtered_logs = "" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index a41161c..b84326a 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -29,9 +29,6 @@ class Host(ABC): self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} - def __repr__(self) -> str: - return self.config.address - @property def config(self) -> HostConfig: """Returns config of the host. @@ -120,17 +117,6 @@ class Host(ABC): service_name: Name of the service to stop. """ - @abstractmethod - def send_signal_to_service(self, service_name: str, signal: str) -> None: - """Send signal to service with specified name using kill - - - The service must be hosted on this host. - - Args: - service_name: Name of the service to stop. - signal: signal name. See kill -l to all names - """ - @abstractmethod def mask_service(self, service_name: str) -> None: """Prevent the service from start by any activity by masking it. @@ -327,7 +313,6 @@ class Host(ABC): unit: Optional[str] = None, exclude_filter: Optional[str] = None, priority: Optional[str] = None, - word_count: bool = None, ) -> str: """Get logs from host filtered by regex. @@ -338,7 +323,6 @@ class Host(ABC): unit: required unit. priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. - word_count: output type, expected values: lines, bytes, json Returns: Found entries as str if any found. diff --git a/src/frostfs_testlib/http/__init__.py b/src/frostfs_testlib/http/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py new file mode 100644 index 0000000..261b2a6 --- /dev/null +++ b/src/frostfs_testlib/http/http_client.py @@ -0,0 +1,95 @@ +import json +import logging +import logging.config + +import httpx + +from frostfs_testlib import reporter + +timeout = httpx.Timeout(60, read=150) +LOGGING_CONFIG = { + "disable_existing_loggers": False, + "version": 1, + "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, + "formatters": { + "http": { + "format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + } + }, + "loggers": { + "httpx": { + "handlers": ["default"], + "level": "DEBUG", + }, + "httpcore": { + "handlers": ["default"], + "level": "ERROR", + }, + }, +} + +logging.config.dictConfig(LOGGING_CONFIG) +logger = logging.getLogger("NeoLogger") + + +class HttpClient: + @reporter.step("Send {method} request to {url}") + def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response: + transport = httpx.HTTPTransport(verify=False, retries=5) + client = httpx.Client(timeout=timeout, transport=transport) + response = client.request(method, url, **kwargs) + + self._attach_response(response) + logger.info(f"Response: {response.status_code} => {response.text}") + + if expected_status_code: + assert response.status_code == expected_status_code, ( + f"Got {response.status_code} response code" f" while {expected_status_code} expected" + ) + + return response + + def _attach_response(self, response: httpx.Response): + request = response.request + + try: + request_headers = json.dumps(dict(request.headers), indent=4) + except json.JSONDecodeError: + request_headers = str(request.headers) + + try: + request_body = request.read() + try: + request_body = request_body.decode("utf-8") + except UnicodeDecodeError as e: + request_body = f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}" + except Exception as e: + request_body = f"Error reading request body: {str(e)}" + + request_body = "" if request_body is None else request_body + + try: + response_headers = json.dumps(dict(response.headers), indent=4) + except json.JSONDecodeError: + response_headers = str(response.headers) + + report = ( + f"Method: {request.method}\n\n" + f"URL: {request.url}\n\n" + f"Request Headers: {request_headers}\n\n" + f"Request Body: {request_body}\n\n" + f"Response Status Code: {response.status_code}\n\n" + f"Response Headers: {response_headers}\n\n" + f"Response Body: {response.text}\n\n" + ) + curl_request = self._create_curl_request(request.url, request.method, request.headers, request_body) + + reporter.attach(report, "Requests Info") + reporter.attach(curl_request, "CURL") + + def _create_curl_request(self, url: str, method: str, headers: httpx.Headers, data: str) -> str: + headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) + data = f" -d '{data}'" if data else "" + # Option -k means no verify SSL + return f"curl {url} -X {method} {headers}{data} -k" diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 3830203..15103e0 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -182,10 +182,8 @@ class Preset(MetaConfig): pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) # Workers count for preset workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) - # TODO: Deprecated. Acl for container/buckets + # Acl for container/buckets acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) - # APE rule for containers instead of deprecated ACL - rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list) # ------ GRPC ------ # Amount of containers which should be created diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 071675a..5624940 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -193,7 +193,7 @@ class RemoteProcess: ) if "No such file or directory" in terminal.stderr: return None - elif terminal.return_code != 0: + elif terminal.stderr or terminal.return_code != 0: raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") return terminal.stdout diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 53bcfaa..1c93b12 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -53,4 +53,3 @@ HOSTING_CONFIG_FILE = os.getenv( ) MORE_LOG = os.getenv("MORE_LOG", "1") -EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH" diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 15e2977..3ba5f13 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -1,6 +1,5 @@ # Regex patterns of status codes of Container service CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" -SUBJECT_NOT_FOUND = "code = 1024.*message =.*chain/client.*subject not found.*" # Regex patterns of status codes of Object service MALFORMED_REQUEST = "code = 1024.*message = malformed request" @@ -10,7 +9,6 @@ OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" SESSION_NOT_FOUND = "code = 4096.*message = session token not found" OUT_OF_RANGE = "code = 2053.*message = out of range" EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" -ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied" # TODO: Change to codes with message # OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" # LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed @@ -29,10 +27,6 @@ S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" -# Errors from node missing reasons if request was forwarded. Commenting for now -# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" -RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request" +RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" -# Errors from node missing reasons if request was forwarded. Commenting for now -# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" -NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request" +NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" diff --git a/src/frostfs_testlib/resources/optionals.py b/src/frostfs_testlib/resources/optionals.py index 6caf158..2a7ff22 100644 --- a/src/frostfs_testlib/resources/optionals.py +++ b/src/frostfs_testlib/resources/optionals.py @@ -16,10 +16,11 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD") OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true")) # Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped. -OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")) +OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool( + os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true") +) # Set this to False for disable autouse fixture like node healthcheck during developing time. -OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")) - -# Use cache for fixtures with @cachec_fixture decorator -OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false")) +OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool( + os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true") +) diff --git a/src/frostfs_testlib/s3/__init__.py b/src/frostfs_testlib/s3/__init__.py new file mode 100644 index 0000000..32426c2 --- /dev/null +++ b/src/frostfs_testlib/s3/__init__.py @@ -0,0 +1,3 @@ +from frostfs_testlib.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py similarity index 90% rename from src/frostfs_testlib/clients/s3/aws_cli_client.py rename to src/frostfs_testlib/s3/aws_cli_client.py index c1dd6b6..ff4e329 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -6,8 +6,8 @@ from time import sleep from typing import Literal, Optional, Union from frostfs_testlib import reporter -from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME +from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.utils import string_utils @@ -33,14 +33,12 @@ class AwsCliClient(S3ClientWrapper): self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.s3gate_endpoint = s3gate_endpoint - self.iam_endpoint = None - self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.profile = profile - self.region = region - self.local_shell = LocalShell() + self.region = region + self.iam_endpoint = None try: _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") @@ -173,7 +171,7 @@ class AwsCliClient(S3ClientWrapper): return response.get("TagSet") @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> dict: + def get_bucket_acl(self, bucket: str) -> list: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' @@ -181,7 +179,8 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout - return self._to_json(output) + response = self._to_json(output) + return response.get("Grants") @reporter.step("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: @@ -197,20 +196,11 @@ class AwsCliClient(S3ClientWrapper): return response.get("LocationConstraint") @reporter.step("List objects S3") - def list_objects( - self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None - ) -> Union[dict, list[str]]: + def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} " - if page_size: - cmd = cmd.replace("--no-paginate", "") - cmd += f" --page-size {page_size} " - if prefix: - cmd += f" --prefix {prefix}" - if self.profile: - cmd += f" --profile {self.profile} " + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -862,7 +852,7 @@ class AwsCliClient(S3ClientWrapper): return response["Parts"] @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' @@ -959,15 +949,6 @@ class AwsCliClient(S3ClientWrapper): return json_output - @reporter.step("Create presign url for the object") - def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: - # AWS CLI does not support method definition and world only in 'get_object' state by default - cmd = f"aws {self.common_flags} s3 presign s3://{bucket}/{key} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - if expires_in: - cmd += f" --expires-in {expires_in}" - response = self.local_shell.exec(cmd).stdout - return response.strip() - # IAM METHODS # # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) @@ -988,7 +969,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -999,7 +980,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -1131,7 +1112,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -1142,7 +1123,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -1238,7 +1219,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @@ -1250,7 +1231,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @@ -1275,7 +1256,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @@ -1287,7 +1268,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @@ -1299,7 +1280,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @@ -1335,7 +1316,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @@ -1361,7 +1342,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -1376,7 +1357,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @@ -1459,90 +1440,3 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response - - # MFA METHODS - @reporter.step("Creates a new virtual MFA device") - def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple: - cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\ - --outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}" - - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") - assert serial_number, f"Expected SerialNumber in response:\n{response}" - - return serial_number, False - - @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") - def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: - cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes a virtual MFA device") - def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") - def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: - cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\ - --authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Lists the MFA devices for an IAM user") - def iam_list_virtual_mfa_devices(self) -> dict: - cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" - - return response - - @reporter.step("Get session token for user") - def sts_get_session_token( - self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None - ) -> tuple: - cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}" - if duration_seconds: - cmd += f" --duration-seconds {duration_seconds}" - if serial_number: - cmd += f" --serial-number {serial_number}" - if token_code: - cmd += f" --token-code {token_code}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - access_key = response.get("Credentials", {}).get("AccessKeyId") - secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") - session_token = response.get("Credentials", {}).get("SessionToken") - assert access_key, f"Expected AccessKeyId in response:\n{response}" - assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" - assert session_token, f"Expected SessionToken in response:\n{response}" - - return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py similarity index 89% rename from src/frostfs_testlib/clients/s3/boto3_client.py rename to src/frostfs_testlib/s3/boto3_client.py index ac4d55b..91d8c5a 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -10,13 +10,11 @@ import boto3 import urllib3 from botocore.config import Config from botocore.exceptions import ClientError -from mypy_boto3_iam import IAMClient from mypy_boto3_s3 import S3Client -from mypy_boto3_sts import STSClient from frostfs_testlib import reporter -from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME +from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.utils import string_utils # TODO: Refactor this code to use shell instead of _cmd_run @@ -37,25 +35,23 @@ class Boto3ClientWrapper(S3ClientWrapper): def __init__( self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: - self.s3gate_endpoint: str = "" self.boto3_client: S3Client = None + self.s3gate_endpoint: str = "" + self.boto3_iam_client: S3Client = None self.iam_endpoint: str = "" - self.boto3_iam_client: IAMClient = None - self.boto3_sts_client: STSClient = None - self.access_key_id = access_key_id - self.secret_access_key = secret_access_key + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key self.profile = profile self.region = region self.session = boto3.Session() self.config = Config( - signature_version="s3v4", retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE, - }, + } ) self.set_endpoint(s3gate_endpoint) @@ -88,19 +84,9 @@ class Boto3ClientWrapper(S3ClientWrapper): service_name="iam", aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, - region_name=self.region, endpoint_url=self.iam_endpoint, verify=False, ) - # since the STS does not have an endpoint, IAM is used - self.boto3_sts_client = self.session.client( - service_name="sts", - aws_access_key_id=self.access_key_id, - aws_secret_access_key=self.secret_access_key, - endpoint_url=iam_endpoint, - region_name=self.region, - verify=False, - ) def _to_s3_param(self, param: str) -> str: replacement_map = { @@ -148,7 +134,6 @@ class Boto3ClientWrapper(S3ClientWrapper): params = {"Bucket": bucket} if object_lock_enabled_for_bucket is not None: params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) - if acl is not None: params.update({"ACL": acl}) elif grant_write or grant_read or grant_full_control: @@ -158,7 +143,6 @@ class Boto3ClientWrapper(S3ClientWrapper): params.update({"GrantRead": grant_read}) elif grant_full_control: params.update({"GrantFullControl": grant_full_control}) - if location_constraint: params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) @@ -235,13 +219,14 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("TagSet") @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> dict: - return self._exec_request( + def get_bucket_acl(self, bucket: str) -> list: + response = self._exec_request( self.boto3_client.get_bucket_acl, params={"Bucket": bucket}, endpoint=self.s3gate_endpoint, profile=self.profile, ) + return response.get("Grants") @reporter.step("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: @@ -403,17 +388,10 @@ class Boto3ClientWrapper(S3ClientWrapper): return response if full_output else obj_list @reporter.step("List objects S3") - def list_objects( - self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None - ) -> Union[dict, list[str]]: - params = {"Bucket": bucket} - if page_size: - params["MaxKeys"] = page_size - if prefix: - params["Prefix"] = prefix + def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: response = self._exec_request( self.boto3_client.list_objects, - params, + params={"Bucket": bucket}, endpoint=self.s3gate_endpoint, profile=self.profile, ) @@ -709,7 +687,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["Parts"] @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] params = self._convert_to_s3_params(locals(), exclude=["parts"]) params["MultipartUpload"] = {"Parts": parts} @@ -778,7 +756,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("TagSet") @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + def delete_object_tagging(self, bucket: str, key: str) -> None: params = self._convert_to_s3_params(locals()) self._exec_request( self.boto3_client.delete_object_tagging, @@ -821,16 +799,6 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> dict: raise NotImplementedError("Cp is not supported for boto3 client") - @reporter.step("Create presign url for the object") - def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: - response = self._exec_request( - method=self.boto3_client.generate_presigned_url, - params={"ClientMethod": method, "Params": {"Bucket": bucket, "Key": key}, "ExpiresIn": expires_in}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response - # END OBJECT METHODS # # IAM METHODS # @@ -855,7 +823,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Attaches the specified managed policy to the specified user") @@ -867,7 +835,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") @@ -998,7 +966,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified managed policy from the specified user") @@ -1010,7 +978,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") @@ -1106,7 +1074,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") @@ -1117,7 +1085,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") @@ -1142,7 +1110,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM groups") @@ -1152,7 +1120,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") @@ -1163,7 +1131,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists all the managed policies that are available in your AWS account") @@ -1195,7 +1163,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM users") @@ -1220,7 +1188,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") @@ -1235,7 +1203,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 14) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified user from the specified group") @@ -1297,66 +1265,3 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - - # MFA methods - @reporter.step("Creates a new virtual MFA device") - def iam_create_virtual_mfa_device( - self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None - ) -> tuple: - response = self.boto3_iam_client.create_virtual_mfa_device(VirtualMFADeviceName=virtual_mfa_device_name) - - serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") - base32StringSeed = response.get("VirtualMFADevice", {}).get("Base32StringSeed") - assert serial_number, f"Expected SerialNumber in response:\n{response}" - assert base32StringSeed, f"Expected Base32StringSeed in response:\n{response}" - - return serial_number, base32StringSeed - - @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") - def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: - response = self.boto3_iam_client.deactivate_mfa_device(UserName=user_name, SerialNumber=serial_number) - - return response - - @reporter.step("Deletes a virtual MFA device") - def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: - response = self.boto3_iam_client.delete_virtual_mfa_device(SerialNumber=serial_number) - - return response - - @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") - def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: - response = self.boto3_iam_client.enable_mfa_device( - UserName=user_name, - SerialNumber=serial_number, - AuthenticationCode1=authentication_code1, - AuthenticationCode2=authentication_code2, - ) - - return response - - @reporter.step("Lists the MFA devices for an IAM user") - def iam_list_virtual_mfa_devices(self) -> dict: - response = self.boto3_iam_client.list_virtual_mfa_devices() - assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" - - return response - - @reporter.step("Get session token for user") - def sts_get_session_token( - self, duration_seconds: Optional[str] = "", serial_number: Optional[str] = "", token_code: Optional[str] = "" - ) -> tuple: - response = self.boto3_sts_client.get_session_token( - DurationSeconds=duration_seconds, - SerialNumber=serial_number, - TokenCode=token_code, - ) - - access_key = response.get("Credentials", {}).get("AccessKeyId") - secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") - session_token = response.get("Credentials", {}).get("SessionToken") - assert access_key, f"Expected AccessKeyId in response:\n{response}" - assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" - assert session_token, f"Expected SessionToken in response:\n{response}" - - return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py b/src/frostfs_testlib/s3/curl_bucket_resolver.py similarity index 88% rename from src/frostfs_testlib/clients/s3/curl_bucket_resolver.py rename to src/frostfs_testlib/s3/curl_bucket_resolver.py index 4d845cf..b713e79 100644 --- a/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py +++ b/src/frostfs_testlib/s3/curl_bucket_resolver.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.cli.generic_cli import GenericCli -from frostfs_testlib.clients.s3 import BucketContainerResolver +from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.storage.cluster import ClusterNode diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py similarity index 90% rename from src/frostfs_testlib/clients/s3/interfaces.py rename to src/frostfs_testlib/s3/interfaces.py index 0d03a28..c084484 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -22,15 +22,15 @@ class VersioningStatus(HumanReadableEnum): SUSPENDED = "Suspended" -class ACL: - PRIVATE = "private" - PUBLIC_READ = "public-read" - PUBLIC_READ_WRITE = "public-read-write" - AUTHENTICATED_READ = "authenticated-read" - AWS_EXEC_READ = "aws-exec-read" - BUCKET_OWNER_READ = "bucket-owner-read" - BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control" - LOG_DELIVERY_WRITE = "log-delivery-write" +ACL_COPY = [ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control", +] class BucketContainerResolver(ABC): @@ -50,14 +50,6 @@ class BucketContainerResolver(ABC): class S3ClientWrapper(HumanReadableABC): - access_key_id: str - secret_access_key: str - profile: str - region: str - - s3gate_endpoint: str - iam_endpoint: str - @abstractmethod def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: pass @@ -136,7 +128,7 @@ class S3ClientWrapper(HumanReadableABC): """Deletes the tags from the bucket.""" @abstractmethod - def get_bucket_acl(self, bucket: str) -> dict: + def get_bucket_acl(self, bucket: str) -> list: """This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket.""" @abstractmethod @@ -203,9 +195,7 @@ class S3ClientWrapper(HumanReadableABC): """ @abstractmethod - def list_objects( - self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None - ) -> Union[dict, list[str]]: + def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: """Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application @@ -344,7 +334,7 @@ class S3ClientWrapper(HumanReadableABC): """Lists the parts that have been uploaded for a specific multipart upload.""" @abstractmethod - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: """Completes a multipart upload by assembling previously uploaded parts.""" @abstractmethod @@ -377,7 +367,7 @@ class S3ClientWrapper(HumanReadableABC): """Returns the tag-set of an object.""" @abstractmethod - def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + def delete_object_tagging(self, bucket: str, key: str) -> None: """Removes the entire tag set from the specified object.""" @abstractmethod @@ -425,10 +415,6 @@ class S3ClientWrapper(HumanReadableABC): ) -> dict: """cp directory TODO: Add proper description""" - @abstractmethod - def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: - """Creates presign URL""" - # END OF OBJECT METHODS # # IAM METHODS # @@ -592,32 +578,3 @@ class S3ClientWrapper(HumanReadableABC): @abstractmethod def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: """Removes the specified tags from the user""" - - # MFA methods - @abstractmethod - def iam_create_virtual_mfa_device( - self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None - ) -> tuple: - """Creates a new virtual MFA device""" - - @abstractmethod - def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: - """Deactivates the specified MFA device and removes it from association with the user name""" - - @abstractmethod - def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: - """Deletes a virtual MFA device""" - - @abstractmethod - def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: - """Enables the specified MFA device and associates it with the specified IAM user""" - - @abstractmethod - def iam_list_virtual_mfa_devices(self) -> dict: - """Lists the MFA devices for an IAM user""" - - @abstractmethod - def sts_get_session_token( - self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None - ) -> tuple: - """Get session token for user""" diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index c0f3b06..746070f 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -141,6 +141,6 @@ class LocalShell(Shell): f"RETCODE: {result.return_code}\n\n" f"STDOUT:\n{result.stdout}\n" f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" ) reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 3f13dca..e718b4d 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -68,7 +68,8 @@ class SshConnectionProvider: try: if creds.ssh_key_path: logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " f"{creds.ssh_key_path} (attempt {attempt})" + f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " + f"{creds.ssh_key_path} (attempt {attempt})" ) connection.connect( hostname=host, @@ -78,7 +79,9 @@ class SshConnectionProvider: timeout=self.CONNECTION_TIMEOUT, ) else: - logger.info(f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})") + logger.info( + f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})" + ) connection.connect( hostname=host, port=port, @@ -101,7 +104,9 @@ class SshConnectionProvider: connection.close() can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS if can_retry: - logger.warn(f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}") + logger.warn( + f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}" + ) sleep(self.SSH_ATTEMPTS_INTERVAL) continue logger.exception(f"Can't connect to host {host}") @@ -134,7 +139,7 @@ def log_command(func): f"RC:\n {result.return_code}\n" f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n" f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n" - f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" ) if not options.no_log: @@ -180,11 +185,13 @@ class SSHShell(Shell): private_key_passphrase: Optional[str] = None, port: str = "22", command_inspectors: Optional[list[CommandInspector]] = None, - custom_environment: Optional[dict] = None, + custom_environment: Optional[dict] = None ) -> None: super().__init__() self.connection_provider = SshConnectionProvider() - self.connection_provider.store_creds(host, SshCredentials(login, password, private_key_path, private_key_passphrase)) + self.connection_provider.store_creds( + host, SshCredentials(login, password, private_key_path, private_key_passphrase) + ) self.host = host self.port = port @@ -213,7 +220,9 @@ class SSHShell(Shell): result = self._exec_non_interactive(command, options) if options.check and result.return_code != 0: - raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n") + raise RuntimeError( + f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n" + ) return result @log_command diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 092b1a3..809b39a 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -7,7 +7,9 @@ from typing import Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC +from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node from frostfs_testlib.storage.cluster import Cluster, ClusterNode @@ -109,8 +111,6 @@ def create_container( options: Optional[dict] = None, await_mode: bool = True, wait_for_creation: bool = True, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> str: """ @@ -143,8 +143,6 @@ def create_container( result = cli.container.create( rpc_endpoint=endpoint, policy=rule, - nns_name=nns_name, - nns_zone=nns_zone, basic_acl=basic_acl, attributes=attributes, name=name, diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 7f8391d..f28de06 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -12,7 +12,6 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils @@ -753,10 +752,7 @@ def get_object_nodes( ] object_nodes = [ - cluster_node - for netmap_node in netmap_nodes - for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) + cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip ] return object_nodes diff --git a/src/frostfs_testlib/steps/http/__init__.py b/src/frostfs_testlib/steps/http/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py similarity index 90% rename from src/frostfs_testlib/steps/http_gate.py rename to src/frostfs_testlib/steps/http/http_gate.py index aa4abf2..117cded 100644 --- a/src/frostfs_testlib/steps/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -12,8 +12,8 @@ import requests from frostfs_testlib import reporter from frostfs_testlib.cli import GenericCli -from frostfs_testlib.clients.s3.aws_cli_client import command_options from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE +from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object @@ -33,43 +33,39 @@ def get_via_http_gate( oid: str, node: ClusterNode, request_path: Optional[str] = None, - presigned_url: Optional[str] = None, timeout: Optional[int] = 300, ): """ This function gets given object from HTTP gate cid: container id to get object from - oid: object id / object key + oid: object ID node: node to make request request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] """ - request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - if request_path: + # if `request_path` parameter omitted, use default + if request_path is None: + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" + else: request = f"{node.http_gate.get_endpoint()}{request_path}" - if presigned_url: - request = presigned_url + resp = requests.get(request, stream=True, timeout=timeout, verify=False) - response = requests.get(request, stream=True, timeout=timeout, verify=False) - - if not response.ok: + if not resp.ok: raise Exception( f"""Failed to get object via HTTP gate: - request: {response.request.path_url}, - response: {response.text}, - headers: {response.headers}, - status code: {response.status_code} {response.reason}""" + request: {resp.request.path_url}, + response: {resp.text}, + headers: {resp.headers}, + status code: {resp.status_code} {resp.reason}""" ) logger.info(f"Request: {request}") - _attach_allure_step(request, response.status_code) + _attach_allure_step(request, resp.status_code) test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) with open(test_file, "wb") as file: - for chunk in response.iter_content(chunk_size=8192): - file.write(chunk) - + shutil.copyfileobj(resp.raw, file) return test_file @@ -121,12 +117,12 @@ def get_via_http_gate_by_attribute( endpoint: http gate endpoint request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] """ - attr_name = list(attribute.keys())[0] attr_value = quote_plus(str(attribute.get(attr_name))) - - request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" - if request_path: + # if `request_path` parameter ommited, use default + if request_path is None: + request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + else: request = f"{node.http_gate.get_endpoint()}{request_path}" resp = requests.get(request, stream=True, timeout=timeout, verify=False) @@ -361,9 +357,19 @@ def try_to_get_object_via_passed_request_and_expect_error( ) -> None: try: if attrs is None: - get_via_http_gate(cid, oid, node, http_request_path) + get_via_http_gate( + cid=cid, + oid=oid, + node=node, + request_path=http_request_path, + ) else: - get_via_http_gate_by_attribute(cid, attrs, node, http_request_path) + get_via_http_gate_by_attribute( + cid=cid, + attribute=attrs, + node=node, + request_path=http_request_path, + ) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py index 0d0950a..a9e545a 100644 --- a/src/frostfs_testlib/steps/metrics.py +++ b/src/frostfs_testlib/steps/metrics.py @@ -6,7 +6,7 @@ from frostfs_testlib.testing.test_control import wait_for_success @reporter.step("Check metrics result") -@wait_for_success(max_wait_time=300, interval=10) +@wait_for_success(interval=10) def check_metrics_counter( cluster_nodes: list[ClusterNode], operator: str = "==", @@ -19,7 +19,7 @@ def check_metrics_counter( counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) assert eval( f"{counter_act} {operator} {counter_exp}" - ), f"Actual: {counter_act} {operator} Expected: {counter_exp} in nodes: {cluster_nodes}" + ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}" @reporter.step("Get metrics value from node: {node}") diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py index 6bde2f1..efaaf5a 100644 --- a/src/frostfs_testlib/steps/network.py +++ b/src/frostfs_testlib/steps/network.py @@ -4,18 +4,16 @@ from frostfs_testlib.storage.cluster import ClusterNode class IpHelper: @staticmethod - def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None: + def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: shell = node.host.get_shell() - for ip, table in block_ip: - if not table: - shell.exec(f"ip r a blackhole {ip}") - continue - shell.exec(f"ip r a blackhole {ip} table {table}") + for ip in block_ip: + shell.exec(f"ip route add blackhole {ip}") @staticmethod def restore_input_traffic_to_node(node: ClusterNode) -> None: shell = node.host.get_shell() - unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout - - for active_blackhole in unlock_ip.strip().split("\n"): - shell.exec(f"ip r d {active_blackhole}") + unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False)) + if unlock_ip.return_code != 0: + return + for ip in unlock_ip.stdout.strip().split("\n"): + shell.exec(f"ip route del blackhole {ip.split(' ')[1]}") diff --git a/src/frostfs_testlib/steps/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py similarity index 89% rename from src/frostfs_testlib/steps/s3_helper.py rename to src/frostfs_testlib/steps/s3/s3_helper.py index c3092df..dbf48d3 100644 --- a/src/frostfs_testlib/steps/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -6,7 +6,8 @@ from typing import Optional from dateutil.parser import parse from frostfs_testlib import reporter -from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus +from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus +from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode @@ -184,26 +185,3 @@ def search_nodes_with_bucket( break nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) return nodes_list - - -def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int: - if isinstance(value, int): - return value - - if "part" not in value and "object" not in value: - return int(value) - - if object_size is not None: - value = value.replace("object", str(object_size)) - - if part_size is not None: - value = value.replace("part", str(part_size)) - - return int(eval(value)) - - -def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int: - start, end = rng.split(":") - start = get_bytes_relative_to_object(start, object_size, part_size) - end = get_bytes_relative_to_object(end, object_size, part_size) - return (start, end) if int_values else f"bytes {start}-{end}/*" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index b67e34d..9fcc4c9 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -11,10 +11,10 @@ from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.metrics import Metrics from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry +from frostfs_testlib.storage.dataclasses.metrics import Metrics class ClusterNode: diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 2e49208..2cffd3a 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -5,7 +5,6 @@ class ConfigAttributes: WALLET_CONFIG = "wallet_config" CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" - WORKING_DIR = "working_dir" SHARD_CONFIG_PATH = "shard_config_path" LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" @@ -16,7 +15,6 @@ class ConfigAttributes: ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_PROMETHEUS = "endpoint_prometheus" - ENDPOINT_PPROF = "endpoint_pprof" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" @@ -25,6 +23,4 @@ class PlacementRule: DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" - REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X" DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" - EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X" diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 51aaefb..53098b1 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,7 +1,6 @@ -import itertools +import datetime import logging import time -from datetime import datetime, timezone from typing import TypeVar import frostfs_testlib.resources.optionals as optionals @@ -19,7 +18,7 @@ from frostfs_testlib.steps.node_management import include_node_to_network_map, r from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success @@ -40,7 +39,7 @@ class ClusterStateController: def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} - self.dropped_traffic: set[ClusterNode] = set() + self.dropped_traffic: list[ClusterNode] = [] self.excluded_from_netmap: list[StorageNode] = [] self.stopped_services: set[NodeBase] = set() self.cluster = cluster @@ -173,15 +172,6 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Send sighup to all {service_type} services") - def sighup_services_of_type(self, service_type: type[ServiceClass]): - services = self.cluster.services(service_type) - parallel([service.send_signal_to_service for service in services], signal="SIGHUP") - - if service_type == StorageNode: - self.wait_after_storage_startup() - @wait_for_success(600, 60) def wait_s3gate(self, s3gate: S3Gate): with reporter.step(f"Wait for {s3gate} reconnection"): @@ -216,27 +206,21 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Stop {service_type} service on {node}") - def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True): + def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): service = node.service(service_type) service.stop_service(mask) self.stopped_services.add(service) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Send sighup to {service_type} service on {node}") - def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass): - service = node.service(service_type) - service.send_signal_to_service("SIGHUP") - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Start {service_type} service on {node}") - def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): service = node.service(service_type) service.start_service() self.stopped_services.discard(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Start all stopped {service_type} services") - def start_stopped_services_of_type(self, service_type: ServiceClass): + def start_stopped_services_of_type(self, service_type: type[ServiceClass]): stopped_svc = self._get_stopped_by_type(service_type) if not stopped_svc: return @@ -247,20 +231,23 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Restart {service_type} service on {node}") - def restart_service_of_type(self, node: ClusterNode, service_type: ServiceClass): - service = node.service(service_type) - service.restart_service() + @reporter.step("Stop all storage services on cluster") + def stop_all_storage_services(self, reversed_order: bool = False): + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + for node in nodes: + self.stop_service_of_type(node, StorageNode) + + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Restart all {service_type} services") - def restart_services_of_type(self, service_type: type[ServiceClass]): - services = self.cluster.services(service_type) - parallel([service.restart_service for service in services]) + @reporter.step("Stop all S3 gates on cluster") + def stop_all_s3_gates(self, reversed_order: bool = False): + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - if service_type == StorageNode: - self.wait_after_storage_startup() + for node in nodes: + self.stop_service_of_type(node, S3Gate) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -274,6 +261,30 @@ class ClusterStateController: def start_storage_service(self, node: ClusterNode): self.start_service_of_type(node, StorageNode) + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start stopped storage services") + def start_stopped_storage_services(self): + self.start_stopped_services_of_type(StorageNode) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Stop s3 gate on {node}") + def stop_s3_gate(self, node: ClusterNode, mask: bool = True): + self.stop_service_of_type(node, S3Gate, mask) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start s3 gate on {node}") + def start_s3_gate(self, node: ClusterNode): + self.start_service_of_type(node, S3Gate) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start stopped S3 gates") + def start_stopped_s3_gates(self): + self.start_stopped_services_of_type(S3Gate) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): @@ -299,22 +310,22 @@ class ClusterStateController: @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: - interfaces_tables = self._parse_interfaces(block_nodes, name_interface) - IpHelper.drop_input_traffic_to_node(node, interfaces_tables) + list_ip = self._parse_interfaces(block_nodes, name_interface) + IpHelper.drop_input_traffic_to_node(node, list_ip) time.sleep(wakeup_timeout) - self.dropped_traffic.add(node) + self.dropped_traffic.append(node) @reporter.step("Start traffic to {node}") def restore_traffic(self, node: ClusterNode) -> None: IpHelper.restore_input_traffic_to_node(node=node) - self.dropped_traffic.discard(node) + index = self.dropped_traffic.index(node) + self.dropped_traffic.pop(index) @reporter.step("Restore blocked nodes") def restore_all_traffic(self): if not self.dropped_traffic: return parallel(self._restore_traffic_to_node, self.dropped_traffic) - self.dropped_traffic.clear() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Hard reboot host {node} via magic SysRq option") @@ -363,33 +374,31 @@ class ClusterStateController: @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: shell = node.host.get_shell() - return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") + return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") - @reporter.step("Set time on nodes in {in_date}") - def change_date_on_all_nodes(self, cluster: Cluster, in_date: datetime) -> None: - parallel(self.change_node_date, cluster.cluster_nodes, in_date=in_date) - - @reporter.step("Set time on {node} to {in_date}") + @reporter.step("Set node time to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() - in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") - shell.exec(f"timedatectl set-time '{in_date_frmt}'") + shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") + shell.exec("hwclock --systohc") node_time = self.get_node_date(node) - with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): - assert (node_time - in_date).total_seconds() < 60 + assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) - @reporter.step("Restore time on nodes") - def restore_date_on_all_nodes(self, cluster: Cluster) -> None: - parallel(self.restore_node_date, cluster.cluster_nodes) - - @reporter.step("Restore time on {node}") + @reporter.step(f"Restore time") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() - now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") - + now_time = datetime.datetime.now(datetime.timezone.utc) with reporter.step(f"Set {now_time} time"): - shell.exec(f"timedatectl set-time '{now_time}'") + shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") + shell.exec("hwclock --systohc") + + @reporter.step("Change the synchronizer status to {status}") + def set_sync_date_all_nodes(self, status: str): + if status == "active": + parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) + return + parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: @@ -429,11 +438,9 @@ class ClusterStateController: if not checker_node: checker_node = cluster_node netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) - netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node] + netmap = [node for node in netmap if cluster_node.host_ip == node.node] if status == NodeStatus.OFFLINE: - assert ( - cluster_node.get_interface(Interfaces.MGMT) not in netmap - ), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline" + assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" else: assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" @@ -475,6 +482,16 @@ class ClusterStateController: frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) return frostfs_adm, frostfs_cli, frostfs_cli_remote + def _enable_date_synchronizer(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("timedatectl set-ntp true") + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15) + + def _disable_date_synchronizer(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("timedatectl set-ntp false") + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15) + def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: disk_controller_id = DiskController.get_id(node, device) if disk_controller_id in self.detached_disks.keys(): @@ -484,31 +501,17 @@ class ClusterStateController: return disk_controller - @reporter.step("Restore traffic {node}") def _restore_traffic_to_node(self, node): IpHelper.restore_input_traffic_to_node(node) - def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]: - interfaces_and_tables = set() + def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str): + interfaces = [] for node in nodes: - shell = node.host.get_shell() - lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines() - - ips = [] - tables = [] - - for line in lines: - if "src" not in line or "table local" in line: - continue - parts = line.split() - ips.append(parts[-1]) - if "table" in line: - tables.append(parts[parts.index("table") + 1]) - tables.append(None) - - [interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)] - - return interfaces_and_tables + dict_interfaces = node.host.config.interfaces + for type, ip in dict_interfaces.items(): + if name_interface in type: + interfaces.append(ip) + return interfaces @reporter.step("Ping node") def _ping_host(self, node: ClusterNode): diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py index f0b2a21..66f72d6 100644 --- a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -14,19 +14,14 @@ class ConfigStateManager(StateManager): self.cluster = self.csc.cluster @reporter.step("Change configuration for {service_type} on all nodes") - def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False): + def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): services = self.cluster.services(service_type) nodes = self.cluster.nodes(services) self.services_with_changed_config.update([(node, service_type) for node in nodes]) - if not sighup: - self.csc.stop_services_of_type(service_type) - + self.csc.stop_services_of_type(service_type) parallel([node.config(service_type).set for node in nodes], values=values) - if not sighup: - self.csc.start_services_of_type(service_type) - else: - self.csc.sighup_services_of_type(service_type) + self.csc.start_services_of_type(service_type) @reporter.step("Change configuration for {service_type} on {node}") def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): @@ -37,26 +32,18 @@ class ConfigStateManager(StateManager): self.csc.start_service_of_type(node, service_type) @reporter.step("Revert all configuration changes") - def revert_all(self, sighup: bool = False): + def revert_all(self): if not self.services_with_changed_config: return - parallel(self._revert_svc, self.services_with_changed_config, sighup) + parallel(self._revert_svc, self.services_with_changed_config) self.services_with_changed_config.clear() - if not sighup: - self.csc.start_all_stopped_services() + self.csc.start_all_stopped_services() # TODO: parallel can't have multiple parallel_items :( @reporter.step("Revert all configuration {node_and_service}") - def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False): + def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): node, service_type = node_and_service - service = node.service(service_type) - - if not sighup: - self.csc.stop_service_of_type(node, service_type) - + self.csc.stop_service_of_type(node, service_type) node.config(service_type).revert() - - if sighup: - service.send_signal_to_service("SIGHUP") diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index 1199435..f0f1758 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -13,7 +13,6 @@ FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 class ObjectOperations(HumanReadableEnum): PUT = "object.put" - PATCH = "object.patch" GET = "object.get" HEAD = "object.head" GET_RANGE = "object.range" @@ -27,18 +26,6 @@ class ObjectOperations(HumanReadableEnum): return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] -class ContainerOperations(HumanReadableEnum): - PUT = "container.put" - GET = "container.get" - LIST = "container.list" - DELETE = "container.delete" - WILDCARD_ALL = "container.*" - - @staticmethod - def get_all(): - return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] - - @dataclass class Operations: GET_CONTAINER = "GetContainer" @@ -52,7 +39,6 @@ class Operations: SEARCH_OBJECT = "SearchObject" HEAD_OBJECT = "HeadObject" PUT_OBJECT = "PutObject" - PATCH_OBJECT = "PatchObject" class Verb(HumanReadableEnum): @@ -138,7 +124,7 @@ class Rule: if not operations: self.operations = [] - elif isinstance(operations, (ObjectOperations, ContainerOperations)): + elif isinstance(operations, ObjectOperations): self.operations = [operations] else: self.operations = operations diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py index 8969015..81e757c 100644 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -1,9 +1,3 @@ -import time -from functools import wraps -from typing import Callable - -import pytest - from frostfs_testlib.hosting import Host from frostfs_testlib.shell.interfaces import CommandResult @@ -13,11 +7,11 @@ class Metrics: self.storage = StorageMetrics(host, metrics_endpoint) + class StorageMetrics: """ Class represents storage metrics in a cluster """ - def __init__(self, host: Host, metrics_endpoint: str) -> None: self.host = host self.metrics_endpoint = metrics_endpoint @@ -35,46 +29,8 @@ class StorageMetrics: additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") return result - + def get_all_metrics(self) -> CommandResult: shell = self.host.get_shell() result = shell.exec(f"curl -s {self.metrics_endpoint}") return result - - -def wait_until_metric_result_is_stable( - relative_deviation: float = None, absolute_deviation: int = None, max_attempts: int = 10, sleep_interval: int = 30 -): - """ - A decorator function that repeatedly calls the decorated function until its result stabilizes - within a specified relative tolerance or until the maximum number of attempts is reached. - - This decorator is useful for scenarios where a function returns a metric or value that may fluctuate - over time, and you want to ensure that the result has stabilized before proceeding. - """ - - def decorator(func: Callable): - @wraps(func) - def wrapper(*args, **kwargs): - last_result = None - for _ in range(max_attempts): - # first function call - first_result = func(*args, **kwargs) - - # waiting before the second call - time.sleep(sleep_interval) - - # second function call - last_result = func(*args, **kwargs) - - # checking value stability - if first_result == pytest.approx(last_result, rel=relative_deviation, abs=absolute_deviation): - return last_result - - # if stability is not achieved, return the last value - if last_result is not None: - return last_result - - return wrapper - - return decorator diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 5c8b723..8291345 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -65,10 +65,6 @@ class NodeBase(HumanReadableABC): with reporter.step(f"Start {self.name} service on {self.host.config.address}"): self.host.start_service(self.name) - def send_signal_to_service(self, signal: str): - with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"): - self.host.send_signal_to_service(self.name, signal) - @abstractmethod def service_healthcheck(self) -> bool: """Service healthcheck.""" @@ -82,9 +78,6 @@ class NodeBase(HumanReadableABC): def get_metrics_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) - def get_pprof_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF) - def stop_service(self, mask: bool = True): if mask: with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): @@ -147,13 +140,6 @@ class NodeBase(HumanReadableABC): else None ) - def get_working_dir_path(self) -> Optional[str]: - """ - Returns working directory path located on remote host - """ - config_attributes = self.host.get_service_config(self.name) - return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None - @property def config_dir(self) -> str: return self._get_attribute(ConfigAttributes.CONFIG_DIR) @@ -199,7 +185,9 @@ class NodeBase(HumanReadableABC): if attribute_name not in config.attributes: if default_attribute_name is None: - raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either") + raise RuntimeError( + f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either" + ) return config.attributes[default_attribute_name] @@ -209,7 +197,9 @@ class NodeBase(HumanReadableABC): return self.host.get_service_config(self.name) def get_service_uptime(self, service: str) -> datetime: - result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2") + result = self.host.get_shell().exec( + f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2" + ) start_time = parser.parse(result.stdout.strip()) current_time = datetime.now(tz=timezone.utc) active_time = current_time - start_time diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 4c303fc..55a8388 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,9 +1,6 @@ -import re from dataclasses import dataclass from typing import Optional -from pydantic import BaseModel, Field, field_validator - from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum @@ -78,37 +75,8 @@ class NodeNetInfo: withdrawal_fee: str = None homomorphic_hashing_disabled: str = None maintenance_mode_allowed: str = None - - -class Attributes(BaseModel): - cluster_name: str = Field(alias="ClusterName") - continent: str = Field(alias="Continent") - country: str = Field(alias="Country") - country_code: str = Field(alias="CountryCode") - external_addr: list[str] = Field(alias="ExternalAddr") - location: str = Field(alias="Location") - node: str = Field(alias="Node") - subdiv: str = Field(alias="SubDiv") - subdiv_code: str = Field(alias="SubDivCode") - un_locode: str = Field(alias="UN-LOCODE") - role: str = Field(alias="role") - - @field_validator("external_addr", mode="before") - @classmethod - def convert_external_addr(cls, value: str) -> list[str]: - return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)] - - -class NodeInfo(BaseModel): - public_key: str = Field(alias="publicKey") - addresses: list[str] = Field(alias="addresses") - state: str = Field(alias="state") - attributes: Attributes = Field(alias="attributes") - - @field_validator("addresses", mode="before") - @classmethod - def convert_external_addr(cls, value: str) -> list[str]: - return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))] + eigen_trust_alpha: str = None + eigen_trust_iterations: str = None @dataclass diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py index d9f94b2..8cef23b 100644 --- a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py +++ b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py @@ -1,15 +1,14 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.storage.grpc_operations.implementations import container, object -class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper): +class CliClientWrapper(interfaces.GrpcClientWrapper): def __init__(self, cli: FrostfsCli) -> None: self.cli = cli - self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli) - self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli) - self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli) - self.ape_manager: interfaces.ApeManagerInterface = implementations.ApeManagerOperations(self.cli) + self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli) + self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli) -class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper): +class RpcClientWrapper(interfaces.GrpcClientWrapper): pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py index df820fa..e69de29 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py @@ -1,5 +0,0 @@ -from .ape_manager import ApeManagerOperations -from .chunks import ChunksOperations -from .container import ContainerOperations -from .netmap import NetmapOperations -from .object import ObjectOperations diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py b/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py deleted file mode 100644 index 070d8a6..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py +++ /dev/null @@ -1,79 +0,0 @@ -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT - - -class ApeManagerOperations: - def __init__(self, cli: FrostfsCli): - self.cli = cli - - @reporter.step("Add ape rule") - def add( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - path: Optional[str] = None, - rule: Optional[str] | Optional[list[str]] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.ape_manager.add( - rpc_endpoint=rpc_endpoint, - chain_id=chain_id, - chain_id_hex=chain_id_hex, - path=path, - rule=rule, - target_name=target_name, - target_type=target_type, - wallet=wallet, - address=address, - timeout=timeout, - ) - - @reporter.step("Get list APE rules") - def list( - self, - rpc_endpoint: str, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.ape_manager.list( - rpc_endpoint=rpc_endpoint, - target_name=target_name, - target_type=target_type, - wallet=wallet, - address=address, - timeout=timeout, - ) - - @reporter.step("Remove APE rule") - def remove( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.ape_manager.remove( - rpc_endpoint=rpc_endpoint, - chain_id=chain_id, - chain_id_hex=chain_id_hex, - target_name=target_name, - target_type=target_type, - wallet=wallet, - address=address, - timeout=timeout, - ) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index 0d787e2..7f3161c 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -6,7 +6,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.cli_utils import parse_netmap_output @@ -30,7 +30,7 @@ class ChunksOperations(interfaces.ChunksInterface): result = [] for node_info in netmap: for cluster_node in cluster.cluster_nodes: - if node_info.node == cluster_node.get_interface(Interfaces.MGMT): + if node_info.node == cluster_node.host_ip: result.append(cluster_node) return result @@ -40,7 +40,7 @@ class ChunksOperations(interfaces.ChunksInterface): for node_info in netmap: if node_info.node_id in chunk.confirmed_nodes: for cluster_node in cluster.cluster_nodes: - if cluster_node.get_interface(Interfaces.MGMT) == node_info.node: + if cluster_node.host_ip == node_info.node: return (cluster_node, node_info) @wait_for_success(300, 5, fail_testcase=None) @@ -161,5 +161,5 @@ class ChunksOperations(interfaces.ChunksInterface): def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: parse_result = json.loads(object_nodes) if parse_result.get("errors"): - raise RuntimeError(", ".join(parse_result["errors"])) + raise parse_result["errors"] return [Chunk(**chunk) for chunk in parse_result["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index afdf6cb..7a637d7 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -1,14 +1,13 @@ import json import logging import re -from time import sleep from typing import List, Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.clients.s3 import BucketContainerResolver from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.utils import json_utils @@ -182,17 +181,20 @@ class ContainerOperations(interfaces.ContainerInterface): force: bool = False, trace: bool = False, ): - return self.cli.container.delete( - rpc_endpoint=endpoint, - cid=cid, - address=address, - await_mode=await_mode, - session=session, - ttl=ttl, - xhdr=xhdr, - force=force, - trace=trace, - ).stdout + try: + return self.cli.container.delete( + rpc_endpoint=endpoint, + cid=cid, + address=address, + await_mode=await_mode, + session=session, + ttl=ttl, + xhdr=xhdr, + force=force, + trace=trace, + ).stdout + except RuntimeError as e: + print(f"Error request:\n{e}") @reporter.step("Get container") def get( @@ -302,16 +304,6 @@ class ContainerOperations(interfaces.ContainerInterface): resolver: BucketContainerResolver = resolver_cls() return resolver.resolve(node, name) - @reporter.step("Wait create container, with list") - def wait_creation(self, cid: str, endpoint: str, attempts: int = 15, sleep_interval: int = 1): - for _ in range(attempts): - containers = self.list(endpoint) - if cid in containers: - return - logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") - sleep(sleep_interval) - raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") - def _parse_cid(self, output: str) -> str: """ Parses container ID from a given CLI output. The input string we expect: diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py deleted file mode 100644 index 76ee69a..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py +++ /dev/null @@ -1,171 +0,0 @@ -import json as module_json -from typing import List, Optional - -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.cli.netmap_parser import NetmapParser -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo - -from .. import interfaces - - -class NetmapOperations(interfaces.NetmapInterface): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - - def epoch( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> int: - """ - Get current epoch number. - """ - output = ( - self.cli.netmap.epoch( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return int(output) - - def netinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeNetInfo: - """ - Get target node info. - """ - output = ( - self.cli.netmap.netinfo( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.netinfo(output) - - def nodeinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> NodeInfo: - """ - Get target node info. - """ - output = ( - self.cli.netmap.nodeinfo( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - json=json, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.node_info(module_json.loads(output)) - - def snapshot( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[NodeNetmapInfo]: - """ - Get target node info. - """ - output = ( - self.cli.netmap.snapshot( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.snapshot_all_nodes(output) - - def snapshot_one_node( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[NodeNetmapInfo]: - """ - Get target one node info. - """ - output = ( - self.cli.netmap.snapshot( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.snapshot_one_node(output, rpc_endpoint) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index be8a470..0e14aec 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -11,7 +11,6 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations from frostfs_testlib.testing.test_control import wait_for_success @@ -207,11 +206,6 @@ class ObjectOperations(interfaces.ObjectInterface): hash_type=hash_type, timeout=timeout, ) - - if range: - # Cut off the range and return only hash - return result.stdout.split(":")[1].strip() - return result.stdout @reporter.step("Head object") @@ -413,57 +407,6 @@ class ObjectOperations(interfaces.ObjectInterface): oid = id_str.split(":")[1] return oid.strip() - @reporter.step("Patch object") - def patch( - self, - cid: str, - oid: str, - endpoint: str, - ranges: list[str] = None, - payloads: list[str] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - trace: bool = False, - ) -> str: - """ - PATCH an object. - - Args: - cid: ID of Container where we get the Object from - oid: Object ID - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] - payloads: An array of file paths to be applied in each range - new_attrs: Attributes to be changed in the format "key1=value1,key2=value2" - replace_attrs: Replace all attributes completely with new ones specified in new_attrs - bearer: Path to Bearer Token file, appends to `--bearer` key - xhdr: Request X-Headers in form of Key=Value - session: Path to a JSON-encoded container session token - timeout: Timeout for the operation - trace: Generate trace ID and print it - Returns: - (str): ID of patched Object - """ - result = self.cli.object.patch( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - range=ranges, - payload=payloads, - new_attrs=new_attrs, - replace_attrs=replace_attrs, - bearer=bearer, - xhdr=xhdr, - session=session, - timeout=timeout, - trace=trace, - ) - return result.stdout.split(":")[1].strip() - @reporter.step("Put object to random node") def put_to_random_node( self, @@ -675,34 +618,7 @@ class ObjectOperations(interfaces.ObjectInterface): cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) + if netmap_node.node == cluster_node.host_ip ] return object_nodes - - @reporter.step("Search parts of object") - def parts( - self, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list[str]: - endpoint = alive_node.storage_node.get_rpc_endpoint() - response = self.cli.object.nodes( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - ttl=1 if is_direct else None, - json=True, - xhdr=xhdr, - timeout=timeout, - verify_presence_all=verify_presence_all, - ) - response_json = json.loads(response.stdout) - return [data_object["object_id"] for data_object in response_json["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py new file mode 100644 index 0000000..c293c2d --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -0,0 +1,392 @@ +from abc import ABC, abstractmethod +from typing import Any, List, Optional + +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.constants import PlacementRule +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo +from frostfs_testlib.utils import file_utils + + +class ChunksInterface(ABC): + @abstractmethod + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + pass + + @abstractmethod + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + pass + + @abstractmethod + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + pass + + @abstractmethod + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + pass + + @abstractmethod + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + @abstractmethod + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + +class ObjectInterface(ABC): + def __init__(self) -> None: + self.chunks: ChunksInterface + + @abstractmethod + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> file_utils.TestFile: + pass + + @abstractmethod + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def hash( + self, + endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult | Any: + pass + + @abstractmethod + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> tuple[file_utils.TestFile, bytes]: + pass + + @abstractmethod + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + oid: Optional[str] = None, + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, + ) -> List: + pass + + @abstractmethod + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + pass + + +class ContainerInterface(ABC): + @abstractmethod + def create( + self, + endpoint: str, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, + attributes: Optional[dict] = None, + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, + name: Optional[str] = None, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + """ + Create a new container and register it in the FrostFS. + It will be stored in the sidechain when the Inner Ring accepts it. + """ + raise NotImplementedError("No implemethed method create") + + @abstractmethod + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ) -> List[str]: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + """ + raise NotImplementedError("No implemethed method delete") + + @abstractmethod + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get container field info.""" + raise NotImplementedError("No implemethed method get") + + @abstractmethod + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get extended ACL table of container.""" + raise NotImplementedError("No implemethed method get-eacl") + + @abstractmethod + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + **params, + ) -> List[str]: + """List all created containers.""" + raise NotImplementedError("No implemethed method list") + + @abstractmethod + def nodes( + self, + endpoint: str, + cid: str, + cluster: Cluster, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + """Show the nodes participating in the container in the current epoch.""" + raise NotImplementedError("No implemethed method nodes") + + +class GrpcClientWrapper(ABC): + def __init__(self) -> None: + self.object: ObjectInterface + self.container: ContainerInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py deleted file mode 100644 index 379bbe0..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .ape_manager import ApeManagerInterface -from .chunks import ChunksInterface -from .container import ContainerInterface -from .netmap import NetmapInterface -from .object import ObjectInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py deleted file mode 100644 index 5b198bc..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py +++ /dev/null @@ -1,48 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional - -from frostfs_testlib.shell.interfaces import CommandResult - - -class ApeManagerInterface(ABC): - @abstractmethod - def add( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - path: Optional[str] = None, - rule: Optional[str] | Optional[list[str]] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - pass - - @abstractmethod - def list( - self, - rpc_endpoint: str, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - pass - - @abstractmethod - def remove( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py deleted file mode 100644 index 986b938..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py +++ /dev/null @@ -1,79 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional - -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo - - -class ChunksInterface(ABC): - @abstractmethod - def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: - pass - - @abstractmethod - def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: - pass - - @abstractmethod - def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: - pass - - @abstractmethod - def get_all( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> list[Chunk]: - pass - - @abstractmethod - def get_parity( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass - - @abstractmethod - def get_first_data( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py deleted file mode 100644 index 397f7b2..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py +++ /dev/null @@ -1,129 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -from frostfs_testlib.storage.cluster import Cluster, ClusterNode - - -class ContainerInterface(ABC): - @abstractmethod - def create( - self, - endpoint: str, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - basic_acl: Optional[str] = None, - await_mode: bool = False, - disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, - name: Optional[str] = None, - nonce: Optional[str] = None, - policy: Optional[str] = None, - session: Optional[str] = None, - subnet: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - """ - Create a new container and register it in the FrostFS. - It will be stored in the sidechain when the Inner Ring accepts it. - """ - raise NotImplementedError("No implemethed method create") - - @abstractmethod - def delete( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - await_mode: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - force: bool = False, - trace: bool = False, - ) -> List[str]: - """ - Delete an existing container. - Only the owner of the container has permission to remove the container. - """ - raise NotImplementedError("No implemethed method delete") - - @abstractmethod - def get( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - json_mode: bool = True, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get container field info.""" - raise NotImplementedError("No implemethed method get") - - @abstractmethod - def get_eacl( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - json_mode: bool = True, - trace: bool = False, - to: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get extended ACL table of container.""" - raise NotImplementedError("No implemethed method get-eacl") - - @abstractmethod - def list( - self, - endpoint: str, - name: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - owner: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - **params, - ) -> List[str]: - """List all created containers.""" - raise NotImplementedError("No implemethed method list") - - @abstractmethod - def nodes( - self, - endpoint: str, - cid: str, - cluster: Cluster, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - """Show the nodes participating in the container in the current epoch.""" - raise NotImplementedError("No implemethed method nodes") - - @abstractmethod - def wait_creation(self, cid: str, endpoint: str, attempts: Optional[str], sleep_interval: Optional[int]) -> None: - raise NotImplementedError("No implemented method wait_creation") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py deleted file mode 100644 index 3fdc98a..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py +++ /dev/null @@ -1,89 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo - - -class NetmapInterface(ABC): - @abstractmethod - def epoch( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = False, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> int: - """ - Get current epoch number. - """ - raise NotImplementedError("No implemethed method epoch") - - @abstractmethod - def netinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeNetInfo: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method netinfo") - - @abstractmethod - def nodeinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeInfo: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method nodeinfo") - - @abstractmethod - def snapshot( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[NodeNetmapInfo]: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method snapshot") - - @abstractmethod - def snapshot_one_node( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[NodeNetmapInfo]: - """ - Get target one node info. - """ - raise NotImplementedError("No implemethed method snapshot") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py deleted file mode 100644 index 550c461..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py +++ /dev/null @@ -1,223 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, List, Optional - -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.utils import file_utils - -from .chunks import ChunksInterface - - -class ObjectInterface(ABC): - def __init__(self) -> None: - self.chunks: ChunksInterface - - @abstractmethod - def delete( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def get( - self, - cid: str, - oid: str, - endpoint: str, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> file_utils.TestFile: - pass - - @abstractmethod - def get_from_random_node( - self, - cid: str, - oid: str, - cluster: Cluster, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def hash( - self, - endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - range: Optional[str] = None, - salt: Optional[str] = None, - ttl: Optional[int] = None, - session: Optional[str] = None, - hash_type: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def head( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - json_output: bool = True, - is_raw: bool = False, - is_direct: bool = False, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult | Any: - pass - - @abstractmethod - def lock( - self, - cid: str, - oid: str, - endpoint: str, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def put( - self, - path: str, - cid: str, - endpoint: str, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def patch( - self, - cid: str, - oid: str, - endpoint: str, - ranges: Optional[list[str]] = None, - payloads: Optional[list[str]] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - bearer: Optional[str] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - trace: bool = False, - ) -> str: - pass - - @abstractmethod - def put_to_random_node( - self, - path: str, - cid: str, - cluster: Cluster, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def range( - self, - cid: str, - oid: str, - range_cut: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> tuple[file_utils.TestFile, bytes]: - pass - - @abstractmethod - def search( - self, - cid: str, - endpoint: str, - bearer: str = "", - oid: Optional[str] = None, - filters: Optional[dict] = None, - expected_objects_list: Optional[list] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - phy: bool = False, - root: bool = False, - timeout: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - ttl: Optional[int] = None, - ) -> List: - pass - - @abstractmethod - def nodes( - self, - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - pass - - @abstractmethod - def parts( - self, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[str]: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py deleted file mode 100644 index 5edc99f..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py +++ /dev/null @@ -1,14 +0,0 @@ -from abc import ABC - -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli - -from . import interfaces - - -class GrpcClientWrapper(ABC): - def __init__(self) -> None: - self.cli: FrostfsCli - self.object: interfaces.ObjectInterface - self.container: interfaces.ContainerInterface - self.netmap: interfaces.NetmapInterface - self.ape_manager: interfaces.ApeManagerInterface diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py index bc38208..4fa6390 100644 --- a/src/frostfs_testlib/testing/test_control.py +++ b/src/frostfs_testlib/testing/test_control.py @@ -1,16 +1,13 @@ import inspect import logging -import os from functools import wraps from time import sleep, time from typing import Any -import yaml from _pytest.outcomes import Failed from pytest import fail from frostfs_testlib import reporter -from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.utils.func_utils import format_by_args logger = logging.getLogger("NeoLogger") @@ -131,42 +128,6 @@ def run_optionally(enabled: bool, mock_value: Any = True): return deco -def cached_fixture(enabled: bool): - """ - Decorator to cache fixtures. - MUST be placed after @pytest.fixture and before @allure decorators. - - Args: - enabled: if true, decorated func will be cached. - """ - - def deco(func): - @wraps(func) - def func_impl(*a, **kw): - # TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters - cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml") - - if enabled and os.path.exists(cache_file): - with open(cache_file, "r") as cache_input: - return yaml.load(cache_input, Loader=yaml.Loader) - - result = func(*a, **kw) - - if enabled: - with open(cache_file, "w") as cache_output: - yaml.dump(result, cache_output) - return result - - # TODO: cache yielding fixtures - @wraps(func) - def gen_impl(*a, **kw): - raise NotImplementedError("Not implemented for yielding fixtures") - - return gen_impl if inspect.isgeneratorfunction(func) else func_impl - - return deco - - def wait_for_success( max_wait_time: int = 60, interval: int = 1, diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 8787296..32e4346 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -68,7 +68,7 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" f"RC: {return_code}\n" - f"Start / End / Elapsed\t {start_time} / {end_time} / {end_time - start_time}" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}" ) with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): reporter.attach(command_attachment, "Command execution") @@ -80,9 +80,6 @@ def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[d if not params: params = {} - if params.get("Body") and len(params.get("Body")) > 1000: - params["Body"] = "" - output_params = params try: diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index 8839d7f..c2b497f 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -45,7 +45,7 @@ def ensure_directory_opener(path, flags): # TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps # Use object_size dt in future as argument @reporter.step("Generate file") -def generate_file(size: int, file_name: Optional[str] = None) -> TestFile: +def generate_file(size: int) -> TestFile: """Generates a binary file with the specified size in bytes. Args: @@ -54,11 +54,7 @@ def generate_file(size: int, file_name: Optional[str] = None) -> TestFile: Returns: The path to the generated file. """ - - if file_name is None: - file_name = string_utils.unique_name("object-") - - test_file = TestFile(os.path.join(ASSETS_DIR, file_name)) + test_file = TestFile(os.path.join(ASSETS_DIR, string_utils.unique_name("object-"))) with open(test_file, "wb", opener=ensure_directory_opener) as file: file.write(os.urandom(size)) logger.info(f"File with size {size} bytes has been generated: {test_file}") diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 0676085..490abb0 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -64,7 +64,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: try: result = shell.exec(f"{binary_path} {binary['param']}") version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" - versions_at_host[binary_name] = version.strip() + versions_at_host[binary_name] = version except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") versions_at_host[binary_name] = "Unknown" diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index 677aed4..19f3832 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -2,7 +2,7 @@ from typing import Any import pytest -from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper +from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper from frostfs_testlib.storage.dataclasses.acl import EACLRole from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.object_size import ObjectSize diff --git a/tests/test_load_config.py b/tests/test_load_config.py index fbeb587..883b1f2 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -6,7 +6,10 @@ import pytest from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME +from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController +from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode +from frostfs_testlib.storage.dataclasses.node_base import NodeBase @dataclass @@ -126,8 +129,6 @@ class TestLoadConfig: "--size '11'", "--acl 'acl'", "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -160,8 +161,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -318,8 +317,6 @@ class TestLoadConfig: "--no-verify-ssl", "--size '11'", "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -353,8 +350,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -420,26 +415,6 @@ class TestLoadConfig: self._check_preset_params(load_params, params) - @pytest.mark.parametrize( - "load_type, input, value, params", - [ - (LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]), - (LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]), - (LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]), - (LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]), - (LoadType.gRPC, None, None, []), - (LoadType.S3, ["A C ", " B"], ["A C", "B"], []), - (LoadType.S3, None, None, []), - ], - ) - def test_ape_list_parsing_formatter(self, load_type, input, value, params): - load_params = LoadParams(load_type) - load_params.preset = Preset() - load_params.preset.rule = input - assert load_params.preset.rule == value - - self._check_preset_params(load_params, params) - @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): expected_env_vars = { @@ -469,8 +444,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", - "--retry '0'", - "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -502,8 +475,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", - "--retry '0'", - "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -611,8 +582,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", - "--retry '0'", - "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -644,8 +613,6 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", - "--retry '0'", - "--rule ''", "--out ''", "--workers '0'", "--containers '0'",