diff --git a/.devenv.hosting.yaml b/.devenv.hosting.yaml new file mode 100644 index 00000000..f3b8c514 --- /dev/null +++ b/.devenv.hosting.yaml @@ -0,0 +1,109 @@ +hosts: +- address: localhost + hostname: localhost + attributes: + sudo_shell: false + plugin_name: docker + healthcheck_plugin_name: basic + attributes: + skip_readiness_check: True + force_transactions: True + services: + - name: frostfs-storage_01 + attributes: + container_name: s01 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet01.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json + wallet_password: "" + volume_name: storage_storage_s01 + endpoint_data0: s01.frostfs.devenv:8080 + control_endpoint: s01.frostfs.devenv:8081 + un_locode: "RU MOW" + - name: frostfs-storage_02 + attributes: + container_name: s02 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet02.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json + wallet_password: "" + volume_name: storage_storage_s02 + endpoint_data0: s02.frostfs.devenv:8080 + control_endpoint: s02.frostfs.devenv:8081 + un_locode: "RU LED" + - name: frostfs-storage_03 + attributes: + container_name: s03 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet03.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json + wallet_password: "" + volume_name: storage_storage_s03 + endpoint_data0: s03.frostfs.devenv:8080 + control_endpoint: s03.frostfs.devenv:8081 + un_locode: "SE STO" + - name: frostfs-storage_04 + attributes: + container_name: s04 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet04.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json + wallet_password: "" + volume_name: storage_storage_s04 + endpoint_data0: s04.frostfs.devenv:8080 + control_endpoint: s04.frostfs.devenv:8081 + un_locode: "FI HEL" + - name: frostfs-s3_01 + attributes: + container_name: s3_gate + config_path: ../frostfs-dev-env/services/s3_gate/.s3.env + wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json + local_wallet_config_path: ./TemporaryDir/password-s3.yml + local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json + wallet_password: "s3" + endpoint_data0: https://s3.frostfs.devenv:8080 + - name: frostfs-http_01 + attributes: + container_name: http_gate + config_path: ../frostfs-dev-env/services/http_gate/.http.env + wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json + wallet_password: "one" + endpoint_data0: http://http.frostfs.devenv + - name: frostfs-ir_01 + attributes: + container_name: ir01 + config_path: ../frostfs-dev-env/services/ir/.ir.env + wallet_path: ../frostfs-dev-env/services/ir/az.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/ir/az.json + wallet_password: "one" + - name: neo-go_01 + attributes: + container_name: morph_chain + config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml + wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json + wallet_password: "one" + endpoint_internal0: http://morph-chain.frostfs.devenv:30333 + - name: main-chain_01 + attributes: + container_name: main_chain + config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml + wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json + wallet_password: "one" + endpoint_internal0: http://main-chain.frostfs.devenv:30333 + - name: coredns_01 + attributes: + container_name: coredns + clis: + - name: frostfs-cli + exec_path: frostfs-cli diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml new file mode 100644 index 00000000..9aa0d310 --- /dev/null +++ b/.forgejo/workflows/dco.yml @@ -0,0 +1,21 @@ +name: DCO action +on: [pull_request] + +jobs: + dco: + name: DCO + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: '1.21' + + - name: Run commit format checker + uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 + with: + from: 'origin/${{ github.event.pull_request.base.ref }}' diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 14220628..00000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml deleted file mode 100644 index 40ed8fcb..00000000 --- a/.github/workflows/dco.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: DCO check - -on: - pull_request: - branches: - - master - -jobs: - commits_check_job: - runs-on: ubuntu-latest - name: Commits Check - steps: - - name: Get PR Commits - id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@master - with: - token: ${{ secrets.GITHUB_TOKEN }} - - name: DCO Check - uses: tim-actions/dco@master - with: - commits: ${{ steps.get-pr-commits.outputs.commits }} diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..519ca425 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +.* @TrueCloudLab/qa-committers +.forgejo/.* @potyarkin +Makefile @potyarkin diff --git a/pyproject.toml b/pyproject.toml index 5a38dba4..d62f04b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,8 +27,8 @@ dependencies = [ "testrail-api>=1.12.0", "pytest==7.1.2", "tenacity==8.0.1", - "boto3==1.16.33", - "boto3-stubs[essential]==1.16.33", + "boto3==1.35.30", + "boto3-stubs[s3,iam,sts]==1.35.30", ] requires-python = ">=3.10" @@ -62,7 +62,7 @@ authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3Credentia wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider" [project.entry-points."frostfs.testlib.bucket_cid_resolver"] -frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver" +frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver" [tool.isort] profile = "black" @@ -89,4 +89,7 @@ push = false filterwarnings = [ "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", ] -testpaths = ["tests"] \ No newline at end of file +testpaths = ["tests"] + +[project.entry-points.pytest11] +testlib = "frostfs_testlib" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 32e604f3..56d9b83c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,8 +8,9 @@ docstring_parser==0.15 testrail-api==1.12.0 tenacity==8.0.1 pytest==7.1.2 -boto3==1.16.33 -boto3-stubs[essential]==1.16.33 +boto3==1.35.30 +boto3-stubs[s3,iam,sts]==1.35.30 +pydantic==2.10.6 # Dev dependencies black==22.8.0 @@ -21,4 +22,4 @@ pylint==2.17.4 # Packaging dependencies build==0.8.0 setuptools==65.3.0 -twine==4.0.1 +twine==4.0.1 \ No newline at end of file diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 159d48b8..4724a8bb 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1 +1,4 @@ __version__ = "2.0.1" + +from .fixtures import configure_testlib, hosting, session_start_time, temp_directory +from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems diff --git a/src/frostfs_testlib/analytics/__init__.py b/src/frostfs_testlib/analytics/__init__.py index 6995a08a..b0574180 100644 --- a/src/frostfs_testlib/analytics/__init__.py +++ b/src/frostfs_testlib/analytics/__init__.py @@ -1,5 +1,5 @@ from frostfs_testlib.analytics import test_case from frostfs_testlib.analytics.test_case import TestCasePriority from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector -from frostfs_testlib.analytics.test_exporter import TestExporter +from frostfs_testlib.analytics.test_exporter import TСExporter from frostfs_testlib.analytics.testrail_exporter import TestrailExporter diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index 5a569c63..dd6a7fb0 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -3,7 +3,8 @@ from abc import ABC, abstractmethod from frostfs_testlib.analytics.test_collector import TestCase -class TestExporter(ABC): +# TODO: REMOVE ME +class TСExporter(ABC): test_cases_cache = [] test_suites_cache = [] @@ -46,9 +47,7 @@ class TestExporter(ABC): """ @abstractmethod - def update_test_case( - self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section - ) -> None: + def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: """ Update test case in TMS """ @@ -60,9 +59,7 @@ class TestExporter(ABC): for test_case in test_cases: test_suite = self.get_or_create_test_suite(test_case.suite_name) - test_section = self.get_or_create_suite_section( - test_suite, test_case.suite_section_name - ) + test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name) test_case_in_tms = self.search_test_case_id(test_case.id) steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] diff --git a/src/frostfs_testlib/analytics/testrail_exporter.py b/src/frostfs_testlib/analytics/testrail_exporter.py index 610fee58..36c482c4 100644 --- a/src/frostfs_testlib/analytics/testrail_exporter.py +++ b/src/frostfs_testlib/analytics/testrail_exporter.py @@ -1,10 +1,10 @@ from testrail_api import TestRailAPI from frostfs_testlib.analytics.test_collector import TestCase -from frostfs_testlib.analytics.test_exporter import TestExporter +from frostfs_testlib.analytics.test_exporter import TСExporter -class TestrailExporter(TestExporter): +class TestrailExporter(TСExporter): def __init__( self, tr_url: str, @@ -62,19 +62,13 @@ class TestrailExporter(TestExporter): It's help do not call TMS each time then we search test case """ for test_suite in self.test_suites_cache: - self.test_cases_cache.extend( - self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"]) - ) + self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])) def search_test_case_id(self, test_case_id: str) -> object: """ Find test cases in TestRail (cache) by ID """ - test_cases = [ - test_case - for test_case in self.test_cases_cache - if test_case["custom_autotest_name"] == test_case_id - ] + test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id] if len(test_cases) > 1: raise RuntimeError(f"Too many results found in test rail for id {test_case_id}") @@ -87,9 +81,7 @@ class TestrailExporter(TestExporter): """ Get suite name with exact name from Testrail or create if not exist """ - test_rail_suites = [ - suite for suite in self.test_suites_cache if suite["name"] == test_suite_name - ] + test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name] if not test_rail_suites: test_rail_suite = self.api.suites.add_suite( @@ -102,17 +94,13 @@ class TestrailExporter(TestExporter): elif len(test_rail_suites) == 1: return test_rail_suites.pop() else: - raise RuntimeError( - f"Too many results found in test rail for suite name {test_suite_name}" - ) + raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}") def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: """ Get suite section with exact name from Testrail or create new one if not exist """ - test_rail_sections = [ - section for section in test_rail_suite["sections"] if section["name"] == section_name - ] + test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name] if not test_rail_sections: test_rail_section = self.api.sections.add_section( @@ -128,9 +116,7 @@ class TestrailExporter(TestExporter): elif len(test_rail_sections) == 1: return test_rail_sections.pop() else: - raise RuntimeError( - f"Too many results found in test rail for section name {section_name}" - ) + raise RuntimeError(f"Too many results found in test rail for section name {section_name}") def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict: """ @@ -164,9 +150,7 @@ class TestrailExporter(TestExporter): self.api.cases.add_case(**request_body) - def update_test_case( - self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section - ) -> None: + def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: """ Update test case in Testrail """ diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 3600e774..224e9e3f 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -1,10 +1,11 @@ from typing import Optional from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell +from frostfs_testlib.utils.datetime_utils import parse_time class CliCommand: - + TIMEOUT_INACCURACY = 10 WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" @@ -24,9 +25,7 @@ class CliCommand: def __init__(self, shell: Shell, cli_exec_path: str, **base_params): self.shell = shell self.cli_exec_path = cli_exec_path - self.__base_params = " ".join( - [f"--{param} {value}" for param, value in base_params.items() if value] - ) + self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value]) def _format_command(self, command: str, **params) -> str: param_str = [] @@ -48,9 +47,7 @@ class CliCommand: val_str = str(value_item).replace("'", "\\'") param_str.append(f"--{param} '{val_str}'") elif isinstance(value, dict): - param_str.append( - f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'' - ) + param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'') else: if "'" in str(value): value_str = str(value).replace('"', '\\"') @@ -63,12 +60,22 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - return self.shell.exec(self._format_command(command, **params)) + if timeout := params.get("timeout"): + timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY - def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: return self.shell.exec( self._format_command(command, **params), - options=CommandOptions( - interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)] + CommandOptions(timeout=timeout), + ) + + def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: + if timeout := params.get("timeout"): + timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY + + return self.shell.exec( + self._format_command(command, **params), + CommandOptions( + interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)], + timeout=timeout, ), ) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 1d753d9f..bdf4a91a 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -69,9 +69,7 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def set_config( - self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None - ) -> CommandResult: + def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: """Add/update global config value in the FrostFS network. Args: @@ -110,7 +108,7 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def dump_hashes(self, rpc_endpoint: str) -> CommandResult: + def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult: """Dump deployed contract hashes. Args: @@ -125,7 +123,7 @@ class FrostfsAdmMorph(CliCommand): ) def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None + self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None ) -> CommandResult: """Create new FrostFS epoch event in the side chain. @@ -344,9 +342,147 @@ class FrostfsAdmMorph(CliCommand): return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self", "node_netmap_keys"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, + ) + + def add_rule( + self, + chain_id: str, + target_name: str, + target_type: str, + rule: Optional[list[str]] = None, + path: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + chain_name: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + path: Path to encoded chain in JSON or binary format + rule: Rule statement + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape add-rule-chain", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_rule( + self, + chain_id: str, + target_name: str, + target_type: str, + chain_id_hex: Optional[bool] = None, + chain_name: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + chain-id string Chain id + chain-id-hex Flag to parse chain ID as hex + target-name string Resource name in APE resource name format + target-type string Resource type(container/namespace) + timeout duration Timeout for an operation (default 15s) + wallet string Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape get-rule-chain", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_rules( + self, + target_type: str, + target_name: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + chain_name: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape list-rule-chains", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove_rule( + self, + chain_id: str, + target_name: str, + target_type: str, + all: Optional[bool] = None, + chain_name: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + all: Remove all chains + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape rm-rule-chain", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_nns_records( + self, + name: str, + type: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + alphabet_wallets: Optional[str] = None, + ) -> CommandResult: + """Returns domain record of the specified type + + Args: + name: Domain name + type: Domain name service record type(A|CNAME|SOA|TXT) + rpc_endpoint: N3 RPC node endpoint + alphabet_wallets: path to alphabet wallets dir + + Returns: + Command's result + """ + return self._execute( + "morph nns get-records", + **{param: value for param, value in locals().items() if param not in ["self"]}, ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py b/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py new file mode 100644 index 00000000..525a9be6 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py @@ -0,0 +1,70 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliApeManager(CliCommand): + """Operations with APE manager.""" + + def add( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] | Optional[list[str]] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Add rule chain for a target.""" + + return self._execute( + "ape-manager add", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list( + self, + rpc_endpoint: str, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "ape-manager list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "ape-manager remove", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/bearer.py b/src/frostfs_testlib/cli/frostfs_cli/bearer.py new file mode 100644 index 00000000..e21a6c87 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/bearer.py @@ -0,0 +1,54 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliBearer(CliCommand): + def create( + self, + rpc_endpoint: str, + out: str, + issued_at: Optional[str] = None, + expire_at: Optional[str] = None, + not_valid_before: Optional[str] = None, + ape: Optional[str] = None, + eacl: Optional[str] = None, + owner: Optional[str] = None, + json: Optional[bool] = False, + impersonate: Optional[bool] = False, + wallet: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Create bearer token. + + All epoch flags can be specified relative to the current epoch with the +n syntax. + In this case --rpc-endpoint flag should be specified and the epoch in bearer token + is set to current epoch + n. + """ + return self._execute( + "bearer create", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def generate_ape_override( + self, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + cid: Optional[str] = None, + output: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "bearer generate-ape-override", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index c20a9877..d83b7aee 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -2,6 +2,8 @@ from typing import Optional from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL +from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager +from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap @@ -41,3 +43,5 @@ class FrostfsCli: self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file) + self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file) + self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index b5592e88..8bcbe9e5 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -9,11 +9,15 @@ class FrostfsCliContainer(CliCommand): self, rpc_endpoint: str, wallet: Optional[str] = None, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, address: Optional[str] = None, attributes: Optional[dict] = None, basic_acl: Optional[str] = None, await_mode: bool = False, disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, nonce: Optional[str] = None, policy: Optional[str] = None, @@ -35,6 +39,8 @@ class FrostfsCliContainer(CliCommand): basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write', 'private', 'eacl-public-read' (default "private"). disable_timestamp: Disable timestamp container attribute. + force: Skip placement validity check. + trace: Generate trace ID and print it. name: Container name attribute. nonce: UUIDv4 nonce value for container. policy: QL-encoded or JSON-encoded placement policy or path to file with it. @@ -45,6 +51,8 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + nns_zone: Container nns zone attribute. + nns_name: Container nns name attribute. Returns: Command's result. @@ -65,6 +73,7 @@ class FrostfsCliContainer(CliCommand): ttl: Optional[int] = None, xhdr: Optional[dict] = None, force: bool = False, + trace: bool = False, ) -> CommandResult: """ Delete an existing container. @@ -74,6 +83,7 @@ class FrostfsCliContainer(CliCommand): address: Address of wallet account. await_mode: Block execution until container is removed. cid: Container ID. + trace: Generate trace ID and print it. force: Do not check whether container contains locks and remove immediately. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Path to a JSON-encoded container session token. @@ -96,9 +106,11 @@ class FrostfsCliContainer(CliCommand): cid: str, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, await_mode: bool = False, to: Optional[str] = None, json_mode: bool = False, + trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -111,12 +123,14 @@ class FrostfsCliContainer(CliCommand): await_mode: Block execution until container is removed. cid: Container ID. json_mode: Print or dump container in JSON format. + trace: Generate trace ID and print it. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -132,6 +146,7 @@ class FrostfsCliContainer(CliCommand): cid: str, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, await_mode: bool = False, to: Optional[str] = None, session: Optional[str] = None, @@ -148,11 +163,14 @@ class FrostfsCliContainer(CliCommand): cid: Container ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. + json_mode: Print or dump container in JSON format. + trace: Generate trace ID and print it. session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -166,8 +184,10 @@ class FrostfsCliContainer(CliCommand): def list( self, rpc_endpoint: str, + name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, owner: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -179,12 +199,15 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. + name: List containers by the attribute name. owner: Owner of containers (omit to use owner from private key). rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -198,8 +221,11 @@ class FrostfsCliContainer(CliCommand): self, rpc_endpoint: str, cid: str, + bearer: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -210,11 +236,14 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. cid: Container ID. + bearer: File with signed JSON or binary encoded bearer token. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -224,6 +253,7 @@ class FrostfsCliContainer(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) + # TODO Deprecated method with 0.42 def set_eacl( self, rpc_endpoint: str, @@ -269,6 +299,7 @@ class FrostfsCliContainer(CliCommand): address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, + trace: bool = False, short: Optional[bool] = True, xhdr: Optional[dict] = None, generate_key: Optional[bool] = None, @@ -286,8 +317,9 @@ class FrostfsCliContainer(CliCommand): from_file: string File path with encoded container timeout: duration Timeout for the operation (default 15 s) short: shorten the output of node information. + trace: Generate trace ID and print it. xhdr: Dict with request X-Headers. - generate_key: Generate a new private key + generate_key: Generate a new private key. Returns: diff --git a/src/frostfs_testlib/cli/frostfs_cli/control.py b/src/frostfs_testlib/cli/frostfs_cli/control.py index 2cddfdfa..957bca94 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/control.py +++ b/src/frostfs_testlib/cli/frostfs_cli/control.py @@ -69,7 +69,7 @@ class FrostfsCliControl(CliCommand): wallet: Path to the wallet or binary key address: Address of wallet account endpoint: Remote node control address (as 'multiaddr' or ':') - objects: List of object addresses to be removed in string format + objects: List of object addresses to be removed in string format timeout: Timeout for an operation (default 15s) Returns: @@ -78,4 +78,155 @@ class FrostfsCliControl(CliCommand): return self._execute( "control drop-objects", **{param: value for param, value in locals().items() if param not in ["self"]}, - ) \ No newline at end of file + ) + + def add_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + rule: Optional[list[str]] = None, + path: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + path: Path to encoded chain in JSON or binary format + rule: Rule statement + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control add-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address string Address of wallet account + chain-id string Chain id + chain-id-hex Flag to parse chain ID as hex + endpoint string Remote node control address (as 'multiaddr' or ':') + target-name string Resource name in APE resource name format + target-type string Resource type(container/namespace) + timeout duration Timeout for an operation (default 15s) + wallet string Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control get-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_rules( + self, + endpoint: str, + target_name: str, + target_type: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control list-rules", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_targets( + self, + endpoint: str, + chain_name: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-name: Chain name(ingress|s3) + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control list-targets", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + all: Optional[bool] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + all: Remove all chains + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control remove-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index d2199404..cd197d3b 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -12,6 +12,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -42,6 +43,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -73,6 +75,7 @@ class FrostfsCliNetmap(CliCommand): generate_key: bool = False, json: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -104,6 +107,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 5d5bd91e..e5365440 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -13,6 +13,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -25,6 +26,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Filepath to a JSON- or binary-encoded token of the object DELETE session. @@ -49,6 +51,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, header: Optional[str] = None, no_progress: bool = False, @@ -66,6 +69,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. header: File to write header to. Default: stdout. no_progress: Do not show progress bar. oid: Object ID. @@ -93,6 +97,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, range: Optional[str] = None, salt: Optional[str] = None, ttl: Optional[int] = None, @@ -108,6 +113,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. range: Range to take hash from in the form offset1:length1,... rpc_endpoint: Remote node address (as 'multiaddr' or ':'). @@ -135,6 +141,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, json_mode: bool = False, main_only: bool = False, @@ -153,6 +160,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. json_mode: Marshal output in JSON. main_only: Return only main fields. oid: Object ID. @@ -183,6 +191,7 @@ class FrostfsCliObject(CliCommand): expire_at: Optional[int] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -195,6 +204,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. lifetime: Lock lifetime. expire_at: Lock expiration epoch. @@ -222,6 +232,7 @@ class FrostfsCliObject(CliCommand): address: Optional[str] = None, attributes: Optional[dict] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, copies_number: Optional[int] = None, disable_filename: bool = False, disable_timestamp: bool = False, @@ -246,6 +257,7 @@ class FrostfsCliObject(CliCommand): disable_timestamp: Do not set well-known timestamp attribute. expire_at: Last epoch in the life of the object. file: File with object payload. + generate_key: Generate new private key. no_progress: Do not show progress bar. notify: Object notification in the form of *epoch*:*topic*; '-' topic means using default. @@ -264,6 +276,54 @@ class FrostfsCliObject(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) + def patch( + self, + rpc_endpoint: str, + cid: str, + oid: str, + range: list[str] = None, + payload: list[str] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ttl: Optional[int] = None, + wallet: Optional[str] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + PATCH an object. + + Args: + rpc_endpoint: Remote node address (as 'multiaddr' or ':') + cid: Container ID + oid: Object ID + range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] + payload: An array of file paths to be applied in each range + new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2 + replace_attrs: Replace all attributes completely with new ones specified in new_attrs + address: Address of wallet account + bearer: File with signed JSON or binary encoded bearer token + generate_key: Generate new private key + session: Filepath to a JSON- or binary-encoded token of the object RANGE session + timeout: Timeout for the operation + trace: Generate trace ID and print it + ttl: TTL value in request meta header (default 2) + wallet: WIF (NEP-2) string or path to the wallet or binary key + xhdr: Dict with request X-Headers + + Returns: + Command's result. + """ + return self._execute( + "object patch", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + def range( self, rpc_endpoint: str, @@ -273,6 +333,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, json_mode: bool = False, raw: bool = False, @@ -289,6 +350,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. json_mode: Marshal output in JSON. oid: Object ID. range: Range to take data from in the form offset:length. @@ -315,6 +377,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, filters: Optional[list] = None, oid: Optional[str] = None, phy: bool = False, @@ -332,6 +395,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. filters: Repeated filter expressions or files with protobuf JSON. + generate_key: Generate new private key. oid: Object ID. phy: Search physically stored objects. root: Search for user objects. @@ -354,14 +418,15 @@ class FrostfsCliObject(CliCommand): self, rpc_endpoint: str, cid: str, + oid: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, - oid: Optional[str] = None, trace: bool = False, root: bool = False, verify_presence_all: bool = False, + json: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 4399b139..68a2f544 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -40,7 +40,7 @@ class FrostfsCliShards(CliCommand): self, endpoint: str, mode: str, - id: Optional[list[str]], + id: Optional[list[str]] = None, wallet: Optional[str] = None, wallet_password: Optional[str] = None, address: Optional[str] = None, @@ -143,3 +143,119 @@ class FrostfsCliShards(CliCommand): **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) + def evacuation_start( + self, + endpoint: str, + id: Optional[str] = None, + scope: Optional[str] = None, + all: bool = False, + no_errors: bool = True, + await_mode: bool = False, + address: Optional[str] = None, + timeout: Optional[str] = None, + no_progress: bool = False, + ) -> CommandResult: + """ + Objects evacuation from shard to other shards. + + Args: + address: Address of wallet account + all: Process all shards + await: Block execution until evacuation is completed + endpoint: Remote node control address (as 'multiaddr' or ':') + id: List of shard IDs in base58 encoding + no_errors: Skip invalid/unreadable objects (default true) + no_progress: Print progress if await provided + scope: Evacuation scope; possible values: trees, objects, all (default "all") + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation start", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_reset( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Reset evacuate objects from shard to other shards status. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation reset", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_stop( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Stop running evacuate process from shard to other shards. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation stop", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_status( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Get evacuate objects from shard to other shards status. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation status", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None): + """ + Detach and close the shards + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + id: List of shard IDs in base58 encoding + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards detach", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/tree.py b/src/frostfs_testlib/cli/frostfs_cli/tree.py index af330fed..c75b5260 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/tree.py +++ b/src/frostfs_testlib/cli/frostfs_cli/tree.py @@ -27,3 +27,27 @@ class FrostfsCliTree(CliCommand): "tree healthcheck", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def list( + self, + cid: str, + rpc_endpoint: Optional[str] = None, + wallet: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Get Tree List + + Args: + cid: Container ID. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + timeout: duration Timeout for the operation (default 15 s) + + Returns: + Command's result. + + """ + return self._execute( + "tree list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/util.py b/src/frostfs_testlib/cli/frostfs_cli/util.py index 79141693..37347a5f 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/util.py +++ b/src/frostfs_testlib/cli/frostfs_cli/util.py @@ -54,3 +54,11 @@ class FrostfsCliUtil(CliCommand): "util sign session-token", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False): + """Convert representation of extended ACL table.""" + + return self._execute( + "util convert eacl", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 94d12b8e..4b4a5015 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -15,11 +15,11 @@ class NetmapParser: "epoch_duration": r"Epoch duration: (?P\d+)", "inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P\d+)", "maximum_object_size": r"Maximum object size: (?P\d+)", + "maximum_count_of_data_shards": r"Maximum count of data shards: (?P\d+)", + "maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P\d+)", "withdrawal_fee": r"Withdrawal fee: (?P\d+)", "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", - "eigen_trust_alpha": r"EigenTrustAlpha: (?P\d+\w+$)", - "eigen_trust_iterations": r"EigenTrustIterations: (?P\d+)", } parse_result = {} @@ -62,7 +62,7 @@ class NetmapParser: for node in netmap_nodes: for key, regex in regexes.items(): search_result = re.search(regex, node, flags=re.MULTILINE) - if search_result == None: + if search_result is None: result_netmap[key] = None continue if key == "node_data_ips": @@ -81,9 +81,22 @@ class NetmapParser: return dataclasses_netmap @staticmethod - def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: + def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None: snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip] - if not snapshot_node: - return None - return snapshot_node[0] + for snapshot in snapshot_nodes: + for endpoint in snapshot.external_address: + if rpc_endpoint.split(":")[0] in endpoint: + return snapshot + + @staticmethod + def node_info(output: dict) -> NodeInfo: + data_dict = {"attributes": {}} + + for key, value in output.items(): + if key != "attributes": + data_dict[key] = value + + for attribute in output["attributes"]: + data_dict["attributes"][attribute["key"]] = attribute["value"] + + return NodeInfo(**data_dict) diff --git a/src/frostfs_testlib/clients/__init__.py b/src/frostfs_testlib/clients/__init__.py new file mode 100644 index 00000000..e46766b6 --- /dev/null +++ b/src/frostfs_testlib/clients/__init__.py @@ -0,0 +1,5 @@ +from frostfs_testlib.clients.http.http_client import HttpClient +from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper +from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient diff --git a/src/frostfs_testlib/clients/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py new file mode 100644 index 00000000..ab6e2b07 --- /dev/null +++ b/src/frostfs_testlib/clients/http/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.clients.http.http_client import HttpClient diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py new file mode 100644 index 00000000..16d77075 --- /dev/null +++ b/src/frostfs_testlib/clients/http/http_client.py @@ -0,0 +1,152 @@ +import io +import json +import logging +import logging.config +from typing import Mapping, Sequence + +import httpx + +from frostfs_testlib import reporter + +timeout = httpx.Timeout(60, read=150) +LOGGING_CONFIG = { + "disable_existing_loggers": False, + "version": 1, + "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, + "formatters": { + "http": { + "format": "%(asctime)s [%(levelname)s] %(name)s - %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + } + }, + "loggers": { + "httpx": { + "handlers": ["default"], + "level": "ERROR", + }, + "httpcore": { + "handlers": ["default"], + "level": "ERROR", + }, + }, +} + +logging.config.dictConfig(LOGGING_CONFIG) +logger = logging.getLogger("NeoLogger") + + +class HttpClient: + @reporter.step("Send {method} request to {url}") + def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response: + transport = httpx.HTTPTransport(verify=False, retries=5) + client = httpx.Client(timeout=timeout, transport=transport) + response = client.request(method, url, **kwargs) + + self._attach_response(response, **kwargs) + # logger.info(f"Response: {response.status_code} => {response.text}") + + if expected_status_code: + assert ( + response.status_code == expected_status_code + ), f"Got {response.status_code} response code while {expected_status_code} expected" + + return response + + @classmethod + def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None: + try: + content = readable.read() + except Exception as e: + logger.warning(f"Unable to read file: {str(e)}") + return None + + if not content: + return None + + request_body = None + + try: + request_body = json.loads(content) + except (json.JSONDecodeError, UnicodeDecodeError) as e: + logger.warning(f"Unable to convert body to json: {str(e)}") + + if request_body is not None: + return json.dumps(request_body, default=str, indent=4) + + try: + request_body = content.decode() + except UnicodeDecodeError as e: + logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}") + + request_body = content if request_body is None else request_body + request_body = "" if len(request_body) > 1000 else request_body + + return request_body + + @classmethod + def _parse_files(cls, files: Mapping | Sequence | None) -> dict: + filepaths = {} + + if not files: + return filepaths + + if isinstance(files, Sequence): + items = files + elif isinstance(files, Mapping): + items = files.items() + else: + raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}") + + for name, file in items: + if isinstance(file, io.IOBase): + filepaths[name] = file.name + elif isinstance(file, Sequence): + filepaths[name] = file[1].name + + return filepaths + + @classmethod + def _attach_response(cls, response: httpx.Response, **kwargs): + request = response.request + request_headers = json.dumps(dict(request.headers), default=str, indent=4) + request_body = cls._parse_body(request) + + files = kwargs.get("files") + request_files = cls._parse_files(files) + + response_headers = json.dumps(dict(response.headers), default=str, indent=4) + response_body = cls._parse_body(response) + + report = ( + f"Method: {request.method}\n\n" + + f"URL: {request.url}\n\n" + + f"Request Headers: {request_headers}\n\n" + + (f"Request Body: {request_body}\n\n" if request_body else "") + + (f"Request Files: {request_files}\n\n" if request_files else "") + + f"Response Status Code: {response.status_code}\n\n" + + f"Response Headers: {response_headers}\n\n" + + (f"Response Body: {response_body}\n\n" if response_body else "") + ) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files) + + reporter.attach(report, "Requests Info") + reporter.attach(curl_request, "CURL") + cls._write_log(curl_request, response_body, response.status_code) + + @classmethod + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: + excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"} + headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers) + + data = f" -d '{data}'" if data else "" + for name, path in files.items(): + data += f' -F "{name}=@{path}"' + + # Option -k means no verify SSL + return f"curl {url} -X {method} {headers}{data} -k" + + @classmethod + def _write_log(cls, curl: str, res_body: str, res_code: int) -> None: + if res_body: + curl += f"\nResponse: {res_code}\n{res_body}" + logger.info(f"{curl}") diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py new file mode 100644 index 00000000..5481f488 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/__init__.py @@ -0,0 +1,3 @@ +from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py new file mode 100644 index 00000000..c1dd6b66 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -0,0 +1,1548 @@ +import json +import logging +import os +from datetime import datetime +from time import sleep +from typing import Literal, Optional, Union + +from frostfs_testlib import reporter +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME +from frostfs_testlib.shell import CommandOptions +from frostfs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.utils import string_utils + +# TODO: Refactor this code to use shell instead of _cmd_run +from frostfs_testlib.utils.cli_utils import _configure_aws_cli +from frostfs_testlib.utils.file_utils import TestFile + +logger = logging.getLogger("NeoLogger") +command_options = CommandOptions(timeout=480) + + +class AwsCliClient(S3ClientWrapper): + __repr_name__: str = "AWS CLI" + + # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed + # certificate in devenv) and disable automatic pagination in CLI output + common_flags = "--no-verify-ssl --no-paginate" + s3gate_endpoint: str + + @reporter.step("Configure S3 client (aws cli)") + def __init__( + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" + ) -> None: + self.s3gate_endpoint = s3gate_endpoint + self.iam_endpoint = None + + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key + self.profile = profile + self.region = region + + self.local_shell = LocalShell() + try: + _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) + self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") + self.local_shell.exec( + f"aws configure set retry_mode {RETRY_MODE} --profile {profile}", + ) + except Exception as err: + raise RuntimeError("Error while configuring AwsCliClient") from err + + @reporter.step("Set S3 endpoint to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + self.s3gate_endpoint = s3gate_endpoint + + @reporter.step("Set IAM endpoint to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + self.iam_endpoint = iam_endpoint + + @reporter.step("Create bucket S3") + def create_bucket( + self, + bucket: Optional[str] = None, + object_lock_enabled_for_bucket: Optional[bool] = None, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + grant_full_control: Optional[str] = None, + location_constraint: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = string_utils.unique_name("bucket-") + + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + if object_lock_enabled_for_bucket is None: + object_lock = "" + elif object_lock_enabled_for_bucket: + object_lock = " --object-lock-enabled-for-bucket" + else: + object_lock = " --no-object-lock-enabled-for-bucket" + cmd = ( + f"aws {self.common_flags} s3api create-bucket --bucket {bucket} " + f"{object_lock} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_full_control: + cmd += f" --grant-full-control {grant_full_control}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + if location_constraint: + cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" + self.local_shell.exec(cmd) + + return bucket + + @reporter.step("List buckets S3") + def list_buckets(self) -> list[str]: + cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + buckets_json = self._to_json(output) + return [bucket["Name"] for bucket in buckets_json["Buckets"]] + + @reporter.step("Delete bucket S3") + def delete_bucket(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + self.local_shell.exec(cmd, command_options) + + @reporter.step("Head bucket S3") + def head_bucket(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + self.local_shell.exec(cmd) + + @reporter.step("Put bucket versioning status") + def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " + f"--versioning-configuration Status={status.value} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Get bucket versioning status") + def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("Status") + + @reporter.step("Put bucket tagging") + def put_bucket_tagging(self, bucket: str, tags: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} + cmd = ( + f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " + f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Get bucket tagging") + def get_bucket_tagging(self, bucket: str) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("TagSet") + + @reporter.step("Get bucket acl") + def get_bucket_acl(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + return self._to_json(output) + + @reporter.step("Get bucket location") + def get_bucket_location(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("LocationConstraint") + + @reporter.step("List objects S3") + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} " + if page_size: + cmd = cmd.replace("--no-paginate", "") + cmd += f" --page-size {page_size} " + if prefix: + cmd += f" --prefix {prefix}" + if self.profile: + cmd += f" --profile {self.profile} " + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step("List objects S3 v2") + def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step("List objects versions S3") + def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response if full_output else response.get("Versions", []) + + @reporter.step("List objects delete markers S3") + def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response if full_output else response.get("DeleteMarkers", []) + + @reporter.step("Copy object S3") + def copy_object( + self, + source_bucket: str, + source_key: str, + bucket: Optional[str] = None, + key: Optional[str] = None, + acl: Optional[str] = None, + metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, + metadata: Optional[dict] = None, + tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, + tagging: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = source_bucket + + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + if key is None: + key = string_utils.unique_name("copy-object-") + + copy_source = f"{source_bucket}/{source_key}" + + cmd = ( + f"aws {self.common_flags} s3api copy-object --copy-source {copy_source} " + f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if acl: + cmd += f" --acl {acl}" + if metadata_directive: + cmd += f" --metadata-directive {metadata_directive}" + if metadata: + cmd += " --metadata " + for meta_key, value in metadata.items(): + cmd += f" {meta_key}={value}" + if tagging_directive: + cmd += f" --tagging-directive {tagging_directive}" + if tagging: + cmd += f" --tagging {tagging}" + self.local_shell.exec(cmd, command_options) + return key + + @reporter.step("Put object S3") + def put_object( + self, + bucket: str, + filepath: str, + key: Optional[str] = None, + metadata: Optional[dict] = None, + tagging: Optional[str] = None, + acl: Optional[str] = None, + object_lock_mode: Optional[str] = None, + object_lock_retain_until_date: Optional[datetime] = None, + object_lock_legal_hold_status: Optional[str] = None, + grant_full_control: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + if key is None: + key = os.path.basename(filepath) + + cmd = ( + f"aws {self.common_flags} s3api put-object --bucket {bucket} --key {key} " + f"--body {filepath} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if tagging: + cmd += f" --tagging '{tagging}'" + if acl: + cmd += f" --acl {acl}" + if object_lock_mode: + cmd += f" --object-lock-mode {object_lock_mode}" + if object_lock_retain_until_date: + cmd += f' --object-lock-retain-until-date "{object_lock_retain_until_date}"' + if object_lock_legal_hold_status: + cmd += f" --object-lock-legal-hold-status {object_lock_legal_hold_status}" + if grant_full_control: + cmd += f" --grant-full-control '{grant_full_control}'" + if grant_read: + cmd += f" --grant-read {grant_read}" + output = self.local_shell.exec(cmd, command_options).stdout + response = self._to_json(output) + return response.get("VersionId") + + @reporter.step("Head object S3") + def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Get object S3") + def get_object( + self, + bucket: str, + key: str, + version_id: Optional[str] = None, + object_range: Optional[tuple[int, int]] = None, + full_output: bool = False, + ) -> dict | TestFile: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " + f"{version} {test_file} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if object_range: + cmd += f" --range bytes={object_range[0]}-{object_range[1]}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response if full_output else test_file + + @reporter.step("Get object ACL") + def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("Grants") + + @reporter.step("Put object ACL") + def put_object_acl( + self, + bucket: str, + key: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " + f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("Grants") + + @reporter.step("Put bucket ACL") + def put_bucket_acl( + self, + bucket: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " + f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + self.local_shell.exec(cmd) + + @reporter.step("Delete objects S3") + def delete_objects(self, bucket: str, keys: list[str]) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") + delete_structure = json.dumps(_make_objs_dict(keys)) + with open(file_path, "w") as out_file: + out_file.write(delete_structure) + logger.info(f"Input file for delete-objects: {delete_structure}") + + cmd = ( + f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd, command_options).stdout + response = self._to_json(output) + return response + + @reporter.step("Delete object S3") + def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api delete-object --bucket {bucket} " + f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd, command_options).stdout + return self._to_json(output) + + @reporter.step("Delete object versions S3") + def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + # Build deletion list in S3 format + delete_list = { + "Objects": [ + { + "Key": object_version["Key"], + "VersionId": object_version["VersionId"], + } + for object_version in object_versions + ] + } + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") + delete_structure = json.dumps(delete_list) + with open(file_path, "w") as out_file: + out_file.write(delete_structure) + logger.info(f"Input file for delete-objects: {delete_structure}") + + cmd = ( + f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd, command_options).stdout + return self._to_json(output) + + @reporter.step("Delete object versions S3 without delete markers") + def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + # Delete objects without creating delete markers + for object_version in object_versions: + self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) + + @reporter.step("Get object attributes") + def get_object_attributes( + self, + bucket: str, + key: str, + attributes: list[str], + version_id: str = "", + max_parts: int = 0, + part_number: int = 0, + full_output: bool = True, + ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + attrs = ",".join(attributes) + version = f" --version-id {version_id}" if version_id else "" + parts = f"--max-parts {max_parts}" if max_parts else "" + part_number_str = f"--part-number-marker {part_number}" if part_number else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " + f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + for attr in attributes: + assert attr in response, f"Expected attribute {attr} in {response}" + + if full_output: + return response + else: + return response.get(attributes[0]) + + @reporter.step("Get bucket policy") + def get_bucket_policy(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("Policy") + + @reporter.step("Delete bucket policy") + def delete_bucket_policy(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Put bucket policy") + def put_bucket_policy(self, bucket: str, policy: dict) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + # Leaving it as is was in test repo. Double dumps to escape resulting string + # Example: + # policy = {"a": 1} + # json.dumps(policy) => {"a": 1} + # json.dumps(json.dumps(policy)) => "{\"a\": 1}" + # TODO: update this + dumped_policy = json.dumps(json.dumps(policy)) + cmd = ( + f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " + f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Get bucket cors") + def get_bucket_cors(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("CORSRules") + + @reporter.step("Put bucket cors") + def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " + f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Delete bucket cors") + def delete_bucket_cors(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Delete bucket tagging") + def delete_bucket_tagging(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Put object retention") + def put_object_retention( + self, + bucket: str, + key: str, + retention: dict, + version_id: Optional[str] = None, + bypass_governance_retention: Optional[bool] = None, + ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " + f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if bypass_governance_retention is not None: + cmd += " --bypass-governance-retention" + self.local_shell.exec(cmd) + + @reporter.step("Put object legal hold") + def put_object_legal_hold( + self, + bucket: str, + key: str, + legal_hold_status: Literal["ON", "OFF"], + version_id: Optional[str] = None, + ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + legal_hold = json.dumps({"Status": legal_hold_status}) + cmd = ( + f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " + f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Put object tagging") + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " + f"{version} --tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Get object tagging") + def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("TagSet") + + @reporter.step("Delete object tagging") + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " + f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Sync directory S3") + def sync( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if acl: + cmd += f" --acl {acl}" + output = self.local_shell.exec(cmd, command_options).stdout + return self._to_json(output) + + @reporter.step("CP directory S3") + def cp( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " + f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if acl: + cmd += f" --acl {acl}" + output = self.local_shell.exec(cmd, command_options).stdout + return self._to_json(output) + + @reporter.step("Create multipart upload S3") + def create_multipart_upload(self, bucket: str, key: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " + f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" + + return response["UploadId"] + + @reporter.step("List multipart uploads S3") + def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("Uploads") + + @reporter.step("Abort multipart upload S3") + def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " + f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Upload part S3") + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd, command_options).stdout + response = self._to_json(output) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" + return response["ETag"] + + @reporter.step("Upload copy part S3") + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd, command_options).stdout + response = self._to_json(output) + assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" + + return response["CopyPartResult"]["ETag"] + + @reporter.step("List parts S3") + def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Parts"), f"Expected Parts in response:\n{response}" + + return response["Parts"] + + @reporter.step("Complete multipart upload S3") + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") + parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} + + with open(file_path, "w") as out_file: + out_file.write(json.dumps(parts_dict)) + + logger.info(f"Input file for complete-multipart-upload: {json.dumps(parts_dict)}") + + cmd = ( + f"aws {self.common_flags} s3api complete-multipart-upload --bucket {bucket} " + f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Put object lock configuration") + def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " + f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + return self._to_json(output) + + @reporter.step("Get object lock configuration") + def get_object_lock_configuration(self, bucket: str): + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("ObjectLockConfiguration") + + @reporter.step("Put bucket lifecycle configuration") + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Get bucket lifecycle configuration") + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Delete bucket lifecycle configuration") + def delete_bucket_lifecycle(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @staticmethod + def _to_json(output: str) -> dict: + json_output = {} + if "{" not in output and "}" not in output: + logger.warning(f"Could not parse json from output {output}") + return json_output + + json_output = json.loads(output[output.index("{") :]) + + return json_output + + @reporter.step("Create presign url for the object") + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + # AWS CLI does not support method definition and world only in 'get_object' state by default + cmd = f"aws {self.common_flags} s3 presign s3://{bucket}/{key} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + if expires_in: + cmd += f" --expires-in {expires_in}" + response = self.local_shell.exec(cmd).stdout + return response.strip() + + # IAM METHODS # + # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) + + @reporter.step("Adds the specified user to the specified group") + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Attaches the specified managed policy to the specified IAM group") + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Attaches the specified managed policy to the specified user") + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + def iam_create_access_key(self, user_name: Optional[str] = None) -> dict: + cmd = f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + if user_name: + cmd += f" --user-name {user_name}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + access_key_id = response["AccessKey"].get("AccessKeyId") + secret_access_key = response["AccessKey"].get("SecretAccessKey") + assert access_key_id, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + + return access_key_id, secret_access_key + + @reporter.step("Creates a new group") + def iam_create_group(self, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Group"), f"Expected Group in response:\n{response}" + assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + @reporter.step("Creates a new managed policy for your AWS account") + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam create-policy --endpoint {self.iam_endpoint}" + f" --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + @reporter.step("Creates a new IAM user for your AWS account") + def iam_create_user(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + @reporter.step("Deletes the access key pair associated with the specified IAM user") + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes the specified IAM group") + def iam_delete_group(self, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes the specified managed policy") + def iam_delete_policy(self, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes the specified IAM user") + def iam_delete_user(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Removes the specified managed policy from the specified IAM group") + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Removes the specified managed policy from the specified user") + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Returns a list of IAM users that are in the specified IAM group") + def iam_get_group(self, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Users" in response.keys(), f"Expected Users in response:\n{response}" + assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + cmd = f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Retrieves information about the specified managed policy") + def iam_get_policy(self, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" + + return response + + @reporter.step("Retrieves information about the specified version of the specified managed policy") + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + cmd = f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" + assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" + + return response + + @reporter.step("Retrieves information about the specified IAM user") + def iam_get_user(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + cmd = f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("UserName"), f"Expected User in response:\n{response}" + + return response + + @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + def iam_list_access_keys(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Lists all managed policies that are attached to the specified IAM group") + def iam_list_attached_group_policies(self, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + + return response + + @reporter.step("Lists all managed policies that are attached to the specified IAM user") + def iam_list_attached_user_policies(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + + return response + + @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" + assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" + + return response + + @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + def iam_list_group_policies(self, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + + return response + + @reporter.step("Lists the IAM groups") + def iam_list_groups(self) -> dict: + cmd = f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + + return response + + @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + def iam_list_groups_for_user(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + + return response + + @reporter.step("Lists all the managed policies that are available in your AWS account") + def iam_list_policies(self) -> dict: + cmd = f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Policies" in response.keys(), f"Expected Policies in response:\n{response}" + + return response + + @reporter.step("Lists information about the versions of the specified managed policy") + def iam_list_policy_versions(self, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Versions"), f"Expected Versions in response:\n{response}" + + return response + + @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + def iam_list_user_policies(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + + return response + + @reporter.step("Lists the IAM users") + def iam_list_users(self) -> dict: + cmd = f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Users" in response.keys(), f"Expected Users in response:\n{response}" + + return response + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}" + f" --group-name {group_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}" + f" --user-name {user_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Removes the specified user from the specified group") + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam remove-user-from-group --endpoint {self.iam_endpoint}" + f" --group-name {group_name} --user-name {user_name}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Updates the name and/or the path of the specified IAM group") + def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + cmd = f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" + if new_name: + cmd += f" --new-group-name {new_name}" + if new_path: + cmd += f" --new-path {new_path}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Updates the name and/or the path of the specified IAM user") + def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + cmd = f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" + if new_name: + cmd += f" --new-user-name {new_name}" + if new_path: + cmd += f" --new-path {new_path}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Adds one or more tags to an IAM user") + def iam_tag_user(self, user_name: str, tags: list) -> dict: + tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + cmd = ( + f"aws {self.common_flags} iam tag-user --user-name {user_name} --tags '{json.dumps(tags_json)}' --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("List tags of IAM user") + def iam_list_user_tags(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Removes the specified tags from the user") + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + tag_keys_joined = " ".join(tag_keys) + cmd = f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + # MFA METHODS + @reporter.step("Creates a new virtual MFA device") + def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple: + cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\ + --outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}" + + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") + assert serial_number, f"Expected SerialNumber in response:\n{response}" + + return serial_number, False + + @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes a virtual MFA device") + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\ + --authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Lists the MFA devices for an IAM user") + def iam_list_virtual_mfa_devices(self) -> dict: + cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" + + return response + + @reporter.step("Get session token for user") + def sts_get_session_token( + self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None + ) -> tuple: + cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}" + if duration_seconds: + cmd += f" --duration-seconds {duration_seconds}" + if serial_number: + cmd += f" --serial-number {serial_number}" + if token_code: + cmd += f" --token-code {token_code}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + access_key = response.get("Credentials", {}).get("AccessKeyId") + secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") + session_token = response.get("Credentials", {}).get("SessionToken") + assert access_key, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + assert session_token, f"Expected SessionToken in response:\n{response}" + + return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py new file mode 100644 index 00000000..ac4d55b8 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -0,0 +1,1362 @@ +import json +import logging +import os +from collections.abc import Callable +from datetime import datetime +from time import sleep +from typing import Literal, Optional, Union + +import boto3 +import urllib3 +from botocore.config import Config +from botocore.exceptions import ClientError +from mypy_boto3_iam import IAMClient +from mypy_boto3_s3 import S3Client +from mypy_boto3_sts import STSClient + +from frostfs_testlib import reporter +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME +from frostfs_testlib.utils import string_utils + +# TODO: Refactor this code to use shell instead of _cmd_run +from frostfs_testlib.utils.cli_utils import log_command_execution +from frostfs_testlib.utils.file_utils import TestFile + +logger = logging.getLogger("NeoLogger") + +# Disable warnings on self-signed certificate which the +# boto library produces on requests to S3-gate in dev-env +urllib3.disable_warnings() + + +class Boto3ClientWrapper(S3ClientWrapper): + __repr_name__: str = "Boto3 client" + + @reporter.step("Configure S3 client (boto3)") + def __init__( + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" + ) -> None: + self.s3gate_endpoint: str = "" + self.boto3_client: S3Client = None + + self.iam_endpoint: str = "" + self.boto3_iam_client: IAMClient = None + self.boto3_sts_client: STSClient = None + + self.access_key_id = access_key_id + self.secret_access_key = secret_access_key + self.profile = profile + self.region = region + + self.session = boto3.Session() + self.config = Config( + signature_version="s3v4", + retries={ + "max_attempts": MAX_REQUEST_ATTEMPTS, + "mode": RETRY_MODE, + }, + ) + + self.set_endpoint(s3gate_endpoint) + + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + if self.s3gate_endpoint == s3gate_endpoint: + return + + self.s3gate_endpoint = s3gate_endpoint + + self.boto3_client: S3Client = self.session.client( + service_name="s3", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + region_name=self.region, + config=self.config, + endpoint_url=s3gate_endpoint, + verify=False, + ) + + @reporter.step("Set endpoint IAM to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + if self.iam_endpoint == iam_endpoint: + return + + self.iam_endpoint = iam_endpoint + + self.boto3_iam_client = self.session.client( + service_name="iam", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + region_name=self.region, + endpoint_url=self.iam_endpoint, + verify=False, + ) + # since the STS does not have an endpoint, IAM is used + self.boto3_sts_client = self.session.client( + service_name="sts", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + endpoint_url=iam_endpoint, + region_name=self.region, + verify=False, + ) + + def _to_s3_param(self, param: str) -> str: + replacement_map = { + "Acl": "ACL", + "Cors": "CORS", + "_": "", + } + result = param.title() + for find, replace in replacement_map.items(): + result = result.replace(find, replace) + return result + + def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict: + exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] + return {self._to_s3_param(param): value for param, value in scope.items() if param not in exclude and value is not None} + + def _exec_request(self, method: Callable, params: Optional[dict] = None, **kwargs): + if not params: + params = {} + + try: + result = method(**params) + except ClientError as err: + log_command_execution(method.__name__, err.response, params, **kwargs) + raise + + log_command_execution(method.__name__, result, params, **kwargs) + return result + + # BUCKET METHODS # + @reporter.step("Create bucket S3") + def create_bucket( + self, + bucket: Optional[str] = None, + object_lock_enabled_for_bucket: Optional[bool] = None, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + grant_full_control: Optional[str] = None, + location_constraint: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = string_utils.unique_name("bucket-") + + params = {"Bucket": bucket} + if object_lock_enabled_for_bucket is not None: + params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) + + if acl is not None: + params.update({"ACL": acl}) + elif grant_write or grant_read or grant_full_control: + if grant_write: + params.update({"GrantWrite": grant_write}) + elif grant_read: + params.update({"GrantRead": grant_read}) + elif grant_full_control: + params.update({"GrantFullControl": grant_full_control}) + + if location_constraint: + params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) + + self._exec_request(self.boto3_client.create_bucket, params, endpoint=self.s3gate_endpoint, profile=self.profile) + return bucket + + @reporter.step("List buckets S3") + def list_buckets(self) -> list[str]: + response = self._exec_request( + self.boto3_client.list_buckets, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return [bucket["Name"] for bucket in response["Buckets"]] + + @reporter.step("Delete bucket S3") + def delete_bucket(self, bucket: str) -> None: + self._exec_request( + self.boto3_client.delete_bucket, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Head bucket S3") + def head_bucket(self, bucket: str) -> None: + self._exec_request( + self.boto3_client.head_bucket, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put bucket versioning status") + def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}} + self._exec_request( + self.boto3_client.put_bucket_versioning, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get bucket versioning status") + def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + response = self._exec_request( + self.boto3_client.get_bucket_versioning, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Status") + + @reporter.step("Put bucket tagging") + def put_bucket_tagging(self, bucket: str, tags: list) -> None: + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + params = self._convert_to_s3_params(locals(), exclude=["tags"]) + self._exec_request( + self.boto3_client.put_bucket_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get bucket tagging") + def get_bucket_tagging(self, bucket: str) -> list: + response = self._exec_request( + self.boto3_client.get_bucket_tagging, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("TagSet") + + @reporter.step("Get bucket acl") + def get_bucket_acl(self, bucket: str) -> dict: + return self._exec_request( + self.boto3_client.get_bucket_acl, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Delete bucket tagging") + def delete_bucket_tagging(self, bucket: str) -> None: + self._exec_request( + self.boto3_client.delete_bucket_tagging, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put bucket ACL") + def put_bucket_acl( + self, + bucket: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> None: + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.put_bucket_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put object lock configuration") + def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + params = {"Bucket": bucket, "ObjectLockConfiguration": configuration} + return self._exec_request( + self.boto3_client.put_object_lock_configuration, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get object lock configuration") + def get_object_lock_configuration(self, bucket: str) -> dict: + response = self._exec_request( + self.boto3_client.get_object_lock_configuration, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("ObjectLockConfiguration") + + @reporter.step("Get bucket policy") + def get_bucket_policy(self, bucket: str) -> str: + response = self._exec_request( + self.boto3_client.get_bucket_policy, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Policy") + + @reporter.step("Delete bucket policy") + def delete_bucket_policy(self, bucket: str) -> str: + return self._exec_request( + self.boto3_client.delete_bucket_policy, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put bucket policy") + def put_bucket_policy(self, bucket: str, policy: dict) -> None: + params = {"Bucket": bucket, "Policy": json.dumps(policy)} + return self._exec_request( + self.boto3_client.put_bucket_policy, + params, + # Overriding option for AWS CLI + policy=policy, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get bucket cors") + def get_bucket_cors(self, bucket: str) -> dict: + response = self._exec_request( + self.boto3_client.get_bucket_cors, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("CORSRules") + + @reporter.step("Get bucket location") + def get_bucket_location(self, bucket: str) -> str: + response = self._exec_request( + self.boto3_client.get_bucket_location, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("LocationConstraint") + + @reporter.step("Put bucket cors") + def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.put_bucket_cors, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Delete bucket cors") + def delete_bucket_cors(self, bucket: str) -> None: + self._exec_request( + self.boto3_client.delete_bucket_cors, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put bucket lifecycle configuration") + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + params = self._convert_to_s3_params(locals(), exclude=["dumped_configuration"]) + return self._exec_request( + self.boto3_client.put_bucket_lifecycle_configuration, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get bucket lifecycle configuration") + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + response = self._exec_request( + self.boto3_client.get_bucket_lifecycle_configuration, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return {"Rules": response.get("Rules")} + + @reporter.step("Delete bucket lifecycle configuration") + def delete_bucket_lifecycle(self, bucket: str) -> dict: + return self._exec_request( + self.boto3_client.delete_bucket_lifecycle, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + # END OF BUCKET METHODS # + # OBJECT METHODS # + + @reporter.step("List objects S3 v2") + def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + response = self._exec_request( + self.boto3_client.list_objects_v2, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + return response if full_output else obj_list + + @reporter.step("List objects S3") + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: + params = {"Bucket": bucket} + if page_size: + params["MaxKeys"] = page_size + if prefix: + params["Prefix"] = prefix + response = self._exec_request( + self.boto3_client.list_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + return response if full_output else obj_list + + @reporter.step("List objects versions S3") + def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + response = self._exec_request( + self.boto3_client.list_object_versions, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response if full_output else response.get("Versions", []) + + @reporter.step("List objects delete markers S3") + def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + response = self._exec_request( + self.boto3_client.list_object_versions, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response if full_output else response.get("DeleteMarkers", []) + + @reporter.step("Put object S3") + def put_object( + self, + bucket: str, + filepath: str, + key: Optional[str] = None, + metadata: Optional[dict] = None, + tagging: Optional[str] = None, + acl: Optional[str] = None, + object_lock_mode: Optional[str] = None, + object_lock_retain_until_date: Optional[datetime] = None, + object_lock_legal_hold_status: Optional[str] = None, + grant_full_control: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> str: + if key is None: + key = os.path.basename(filepath) + + with open(filepath, "rb") as put_file: + body = put_file.read() + + params = self._convert_to_s3_params(locals(), exclude=["filepath", "put_file"]) + response = self._exec_request( + self.boto3_client.put_object, + params, + body=filepath, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("VersionId") + + @reporter.step("Head object S3") + def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.head_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Delete object S3") + def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.delete_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Delete objects S3") + def delete_objects(self, bucket: str, keys: list[str]) -> dict: + params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)} + response = self._exec_request( + self.boto3_client.delete_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + assert ( + "Errors" not in response + ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' + + return response + + @reporter.step("Delete object versions S3") + def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + # Build deletion list in S3 format + delete_list = { + "Objects": [ + { + "Key": object_version["Key"], + "VersionId": object_version["VersionId"], + } + for object_version in object_versions + ] + } + params = {"Bucket": bucket, "Delete": delete_list} + return self._exec_request( + self.boto3_client.delete_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Delete object versions S3 without delete markers") + def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + # Delete objects without creating delete markers + for object_version in object_versions: + params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]} + self._exec_request( + self.boto3_client.delete_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put object ACL") + def put_object_acl( + self, + bucket: str, + key: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.put_object_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Grants") + + @reporter.step("Get object ACL") + def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.get_object_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Grants") + + @reporter.step("Copy object S3") + def copy_object( + self, + source_bucket: str, + source_key: str, + bucket: Optional[str] = None, + key: Optional[str] = None, + acl: Optional[str] = None, + metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, + metadata: Optional[dict] = None, + tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, + tagging: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = source_bucket + + if key is None: + key = string_utils.unique_name("copy-object-") + + copy_source = f"{source_bucket}/{source_key}" + params = self._convert_to_s3_params(locals(), exclude=["source_bucket", "source_key"]) + + self._exec_request( + self.boto3_client.copy_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return key + + @reporter.step("Get object S3") + def get_object( + self, + bucket: str, + key: str, + version_id: Optional[str] = None, + object_range: Optional[tuple[int, int]] = None, + full_output: bool = False, + ) -> dict | TestFile: + range_str = None + if object_range: + range_str = f"bytes={object_range[0]}-{object_range[1]}" + + params = locals() + params.update({"Range": f"bytes={object_range[0]}-{object_range[1]}"} if object_range else {}) + params = self._convert_to_s3_params(params, exclude=["object_range", "full_output", "range_str"]) + response = self._exec_request( + self.boto3_client.get_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + if full_output: + return response + + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) + with open(test_file, "wb") as file: + chunk = response["Body"].read(1024) + while chunk: + file.write(chunk) + chunk = response["Body"].read(1024) + return test_file + + @reporter.step("Create multipart upload S3") + def create_multipart_upload(self, bucket: str, key: str) -> str: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.create_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" + return response["UploadId"] + + @reporter.step("List multipart uploads S3") + def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + response = self._exec_request( + self.boto3_client.list_multipart_uploads, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Uploads") + + @reporter.step("Abort multipart upload S3") + def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.abort_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Upload part S3") + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: + with open(filepath, "rb") as put_file: + body = put_file.read() + + params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) + params["PartNumber"] = part_num + + response = self._exec_request( + self.boto3_client.upload_part, + params, + body=filepath, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" + return response["ETag"] + + @reporter.step("Upload copy part S3") + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: + params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) + params["PartNumber"] = part_num + response = self._exec_request( + self.boto3_client.upload_part_copy, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" + return response["CopyPartResult"]["ETag"] + + @reporter.step("List parts S3") + def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.list_parts, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("Parts"), f"Expected Parts in response:\n{response}" + return response["Parts"] + + @reporter.step("Complete multipart upload S3") + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: + parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] + params = self._convert_to_s3_params(locals(), exclude=["parts"]) + params["MultipartUpload"] = {"Parts": parts} + return self._exec_request( + self.boto3_client.complete_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put object retention") + def put_object_retention( + self, + bucket: str, + key: str, + retention: dict, + version_id: Optional[str] = None, + bypass_governance_retention: Optional[bool] = None, + ) -> None: + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.put_object_retention, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put object legal hold") + def put_object_legal_hold( + self, + bucket: str, + key: str, + legal_hold_status: Literal["ON", "OFF"], + version_id: Optional[str] = None, + ) -> None: + legal_hold = {"Status": legal_hold_status} + params = self._convert_to_s3_params(locals(), exclude=["legal_hold_status"]) + self._exec_request( + self.boto3_client.put_object_legal_hold, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put object tagging") + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + params = self._convert_to_s3_params(locals(), exclude=["tags"]) + self._exec_request( + self.boto3_client.put_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get object tagging") + def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.get_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("TagSet") + + @reporter.step("Delete object tagging") + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.delete_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get object attributes") + def get_object_attributes( + self, + bucket: str, + key: str, + attributes: list[str], + version_id: Optional[str] = None, + max_parts: Optional[int] = None, + part_number: Optional[int] = None, + full_output: bool = True, + ) -> dict: + logger.warning("Method get_object_attributes is not supported by boto3 client") + return {} + + @reporter.step("Sync directory S3") + def sync( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + raise NotImplementedError("Sync is not supported for boto3 client") + + @reporter.step("CP directory S3") + def cp( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + raise NotImplementedError("Cp is not supported for boto3 client") + + @reporter.step("Create presign url for the object") + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + response = self._exec_request( + method=self.boto3_client.generate_presigned_url, + params={"ClientMethod": method, "Params": {"Bucket": bucket, "Key": key}, "ExpiresIn": expires_in}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response + + # END OBJECT METHODS # + + # IAM METHODS # + # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) + + @reporter.step("Adds the specified user to the specified group") + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.add_user_to_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Attaches the specified managed policy to the specified IAM group") + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.attach_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Attaches the specified managed policy to the specified user") + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.attach_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + def iam_create_access_key(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.create_access_key, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + access_key_id = response["AccessKey"].get("AccessKeyId") + secret_access_key = response["AccessKey"].get("SecretAccessKey") + assert access_key_id, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + + return access_key_id, secret_access_key + + @reporter.step("Creates a new group") + def iam_create_group(self, group_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.create_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("Group"), f"Expected Group in response:\n{response}" + assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + @reporter.step("Creates a new managed policy for your AWS account") + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + params = self._convert_to_s3_params(locals()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self._exec_request( + self.boto3_iam_client.create_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + @reporter.step("Creates a new IAM user for your AWS account") + def iam_create_user(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.create_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + @reporter.step("Deletes the access key pair associated with the specified IAM user") + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_access_key, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Deletes the specified IAM group") + def iam_delete_group(self, group_name: str) -> dict: + return self._exec_request( + self.boto3_iam_client.delete_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Deletes the specified managed policy") + def iam_delete_policy(self, policy_arn: str) -> dict: + return self._exec_request( + self.boto3_iam_client.delete_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Deletes the specified IAM user") + def iam_delete_user(self, user_name: str) -> dict: + return self._exec_request( + self.boto3_iam_client.delete_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Removes the specified managed policy from the specified IAM group") + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.detach_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Removes the specified managed policy from the specified user") + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.detach_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Returns a list of IAM users that are in the specified IAM group") + def iam_get_group(self, group_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.get_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + return response + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.get_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Retrieves information about the specified managed policy") + def iam_get_policy(self, policy_arn: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.get_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" + + return response + + @reporter.step("Retrieves information about the specified version of the specified managed policy") + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.get_policy_version, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" + assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" + + return response + + @reporter.step("Retrieves information about the specified IAM user") + def iam_get_user(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.get_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.get_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert response.get("UserName"), f"Expected UserName in response:\n{response}" + return response + + @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + def iam_list_access_keys(self, user_name: str) -> dict: + return self._exec_request( + self.boto3_iam_client.list_access_keys, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Lists all managed policies that are attached to the specified IAM group") + def iam_list_attached_group_policies(self, group_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_attached_group_policies, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + return response + + @reporter.step("Lists all managed policies that are attached to the specified IAM user") + def iam_list_attached_user_policies(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_attached_user_policies, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + return response + + @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_entities_for_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" + assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" + + return response + + @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + def iam_list_group_policies(self, group_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_group_policies, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + return response + + @reporter.step("Lists the IAM groups") + def iam_list_groups(self) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_groups, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + return response + + @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + def iam_list_groups_for_user(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_groups_for_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + return response + + @reporter.step("Lists all the managed policies that are available in your AWS account") + def iam_list_policies(self) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_policies, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert response.get("Policies"), f"Expected Policies in response:\n{response}" + return response + + @reporter.step("Lists information about the versions of the specified managed policy") + def iam_list_policy_versions(self, policy_arn: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_policy_versions, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert response.get("Versions"), f"Expected Versions in response:\n{response}" + return response + + @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + def iam_list_user_policies(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_user_policies, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + return response + + @reporter.step("Lists the IAM users") + def iam_list_users(self) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_users, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert response.get("Users"), f"Expected Users in response:\n{response}" + return response + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + params = self._convert_to_s3_params(locals()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self._exec_request( + self.boto3_iam_client.put_group_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + params = self._convert_to_s3_params(locals()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self._exec_request( + self.boto3_iam_client.put_user_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Removes the specified user from the specified group") + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.remove_user_from_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Updates the name and/or the path of the specified IAM group") + def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: + params = {"GroupName": group_name, "NewGroupName": new_name, "NewPath": "/"} + return self._exec_request( + self.boto3_iam_client.update_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Updates the name and/or the path of the specified IAM user") + def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: + params = {"UserName": user_name, "NewUserName": new_name, "NewPath": "/"} + return self._exec_request( + self.boto3_iam_client.update_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Adds one or more tags to an IAM user") + def iam_tag_user(self, user_name: str, tags: list) -> dict: + params = self._convert_to_s3_params(locals()) + params["Tags"] = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + return self._exec_request( + self.boto3_iam_client.tag_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("List tags of IAM user") + def iam_list_user_tags(self, user_name: str) -> dict: + return self._exec_request( + self.boto3_iam_client.list_user_tags, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Removes the specified tags from the user") + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.untag_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + # MFA methods + @reporter.step("Creates a new virtual MFA device") + def iam_create_virtual_mfa_device( + self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None + ) -> tuple: + response = self.boto3_iam_client.create_virtual_mfa_device(VirtualMFADeviceName=virtual_mfa_device_name) + + serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") + base32StringSeed = response.get("VirtualMFADevice", {}).get("Base32StringSeed") + assert serial_number, f"Expected SerialNumber in response:\n{response}" + assert base32StringSeed, f"Expected Base32StringSeed in response:\n{response}" + + return serial_number, base32StringSeed + + @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + response = self.boto3_iam_client.deactivate_mfa_device(UserName=user_name, SerialNumber=serial_number) + + return response + + @reporter.step("Deletes a virtual MFA device") + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + response = self.boto3_iam_client.delete_virtual_mfa_device(SerialNumber=serial_number) + + return response + + @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + response = self.boto3_iam_client.enable_mfa_device( + UserName=user_name, + SerialNumber=serial_number, + AuthenticationCode1=authentication_code1, + AuthenticationCode2=authentication_code2, + ) + + return response + + @reporter.step("Lists the MFA devices for an IAM user") + def iam_list_virtual_mfa_devices(self) -> dict: + response = self.boto3_iam_client.list_virtual_mfa_devices() + assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" + + return response + + @reporter.step("Get session token for user") + def sts_get_session_token( + self, duration_seconds: Optional[str] = "", serial_number: Optional[str] = "", token_code: Optional[str] = "" + ) -> tuple: + response = self.boto3_sts_client.get_session_token( + DurationSeconds=duration_seconds, + SerialNumber=serial_number, + TokenCode=token_code, + ) + + access_key = response.get("Credentials", {}).get("AccessKeyId") + secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") + session_token = response.get("Credentials", {}).get("SessionToken") + assert access_key, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + assert session_token, f"Expected SessionToken in response:\n{response}" + + return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/s3/curl_bucket_resolver.py b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py similarity index 88% rename from src/frostfs_testlib/s3/curl_bucket_resolver.py rename to src/frostfs_testlib/clients/s3/curl_bucket_resolver.py index b713e792..4d845cf0 100644 --- a/src/frostfs_testlib/s3/curl_bucket_resolver.py +++ b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.cli.generic_cli import GenericCli -from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.clients.s3 import BucketContainerResolver from frostfs_testlib.storage.cluster import ClusterNode diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py similarity index 60% rename from src/frostfs_testlib/s3/interfaces.py rename to src/frostfs_testlib/clients/s3/interfaces.py index b6a10e37..0d03a287 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -4,6 +4,7 @@ from typing import Literal, Optional, Union from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum +from frostfs_testlib.utils.file_utils import TestFile def _make_objs_dict(key_names): @@ -21,15 +22,15 @@ class VersioningStatus(HumanReadableEnum): SUSPENDED = "Suspended" -ACL_COPY = [ - "private", - "public-read", - "public-read-write", - "authenticated-read", - "aws-exec-read", - "bucket-owner-read", - "bucket-owner-full-control", -] +class ACL: + PRIVATE = "private" + PUBLIC_READ = "public-read" + PUBLIC_READ_WRITE = "public-read-write" + AUTHENTICATED_READ = "authenticated-read" + AWS_EXEC_READ = "aws-exec-read" + BUCKET_OWNER_READ = "bucket-owner-read" + BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control" + LOG_DELIVERY_WRITE = "log-delivery-write" class BucketContainerResolver(ABC): @@ -49,14 +50,26 @@ class BucketContainerResolver(ABC): class S3ClientWrapper(HumanReadableABC): + access_key_id: str + secret_access_key: str + profile: str + region: str + + s3gate_endpoint: str + iam_endpoint: str + @abstractmethod - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str) -> None: + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: pass @abstractmethod def set_endpoint(self, s3gate_endpoint: str): """Set endpoint""" + @abstractmethod + def set_iam_endpoint(self, iam_endpoint: str): + """Set iam endpoint""" + @abstractmethod def create_bucket( self, @@ -123,7 +136,7 @@ class S3ClientWrapper(HumanReadableABC): """Deletes the tags from the bucket.""" @abstractmethod - def get_bucket_acl(self, bucket: str) -> list: + def get_bucket_acl(self, bucket: str) -> dict: """This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket.""" @abstractmethod @@ -152,6 +165,10 @@ class S3ClientWrapper(HumanReadableABC): def get_bucket_policy(self, bucket: str) -> str: """Returns the policy of a specified bucket.""" + @abstractmethod + def delete_bucket_policy(self, bucket: str) -> str: + """Deletes the policy of a specified bucket.""" + @abstractmethod def put_bucket_policy(self, bucket: str, policy: dict) -> None: """Applies S3 bucket policy to an S3 bucket.""" @@ -186,7 +203,9 @@ class S3ClientWrapper(HumanReadableABC): """ @abstractmethod - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: """Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application @@ -285,7 +304,7 @@ class S3ClientWrapper(HumanReadableABC): version_id: Optional[str] = None, object_range: Optional[tuple[int, int]] = None, full_output: bool = False, - ) -> Union[dict, str]: + ) -> dict | TestFile: """Retrieves objects from S3.""" @abstractmethod @@ -325,7 +344,7 @@ class S3ClientWrapper(HumanReadableABC): """Lists the parts that have been uploaded for a specific multipart upload.""" @abstractmethod - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: """Completes a multipart upload by assembling previously uploaded parts.""" @abstractmethod @@ -358,9 +377,21 @@ class S3ClientWrapper(HumanReadableABC): """Returns the tag-set of an object.""" @abstractmethod - def delete_object_tagging(self, bucket: str, key: str) -> None: + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: """Removes the entire tag set from the specified object.""" + @abstractmethod + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + """Adds or updates bucket lifecycle configuration""" + + @abstractmethod + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + """Gets bucket lifecycle configuration""" + + @abstractmethod + def delete_bucket_lifecycle(self, bucket: str) -> dict: + """Deletes bucket lifecycle""" + @abstractmethod def get_object_attributes( self, @@ -394,4 +425,199 @@ class S3ClientWrapper(HumanReadableABC): ) -> dict: """cp directory TODO: Add proper description""" + @abstractmethod + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + """Creates presign URL""" + # END OF OBJECT METHODS # + + # IAM METHODS # + + @abstractmethod + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + """Adds the specified user to the specified group""" + + @abstractmethod + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: + """Attaches the specified managed policy to the specified IAM group""" + + @abstractmethod + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + """Attaches the specified managed policy to the specified user""" + + @abstractmethod + def iam_create_access_key(self, user_name: str) -> dict: + """Creates a new AWS secret access key and access key ID for the specified user""" + + @abstractmethod + def iam_create_group(self, group_name: str) -> dict: + """Creates a new group""" + + @abstractmethod + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + """Creates a new managed policy for your AWS account""" + + @abstractmethod + def iam_create_user(self, user_name: str) -> dict: + """Creates a new IAM user for your AWS account""" + + @abstractmethod + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + """Deletes the access key pair associated with the specified IAM user""" + + @abstractmethod + def iam_delete_group(self, group_name: str) -> dict: + """Deletes the specified IAM group""" + + @abstractmethod + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + """Deletes the specified inline policy that is embedded in the specified IAM group""" + + @abstractmethod + def iam_delete_policy(self, policy_arn: str) -> dict: + """Deletes the specified managed policy""" + + @abstractmethod + def iam_delete_user(self, user_name: str) -> dict: + """Deletes the specified IAM user""" + + @abstractmethod + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + """Deletes the specified inline policy that is embedded in the specified IAM user""" + + @abstractmethod + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + """Removes the specified managed policy from the specified IAM group""" + + @abstractmethod + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + """Removes the specified managed policy from the specified user""" + + @abstractmethod + def iam_get_group(self, group_name: str) -> dict: + """Returns a list of IAM users that are in the specified IAM group""" + + @abstractmethod + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + """Retrieves the specified inline policy document that is embedded in the specified IAM group""" + + @abstractmethod + def iam_get_policy(self, policy_arn: str) -> dict: + """Retrieves information about the specified managed policy""" + + @abstractmethod + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + """Retrieves information about the specified version of the specified managed policy""" + + @abstractmethod + def iam_get_user(self, user_name: str) -> dict: + """Retrieves information about the specified IAM user""" + + @abstractmethod + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + """Retrieves the specified inline policy document that is embedded in the specified IAM user""" + + @abstractmethod + def iam_list_access_keys(self, user_name: str) -> dict: + """Returns information about the access key IDs associated with the specified IAM user""" + + @abstractmethod + def iam_list_attached_group_policies(self, group_name: str) -> dict: + """Lists all managed policies that are attached to the specified IAM group""" + + @abstractmethod + def iam_list_attached_user_policies(self, user_name: str) -> dict: + """Lists all managed policies that are attached to the specified IAM user""" + + @abstractmethod + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + """Lists all IAM users, groups, and roles that the specified managed policy is attached to""" + + @abstractmethod + def iam_list_group_policies(self, group_name: str) -> dict: + """Lists the names of the inline policies that are embedded in the specified IAM group""" + + @abstractmethod + def iam_list_groups(self) -> dict: + """Lists the IAM groups""" + + @abstractmethod + def iam_list_groups_for_user(self, user_name: str) -> dict: + """Lists the IAM groups that the specified IAM user belongs to""" + + @abstractmethod + def iam_list_policies(self) -> dict: + """Lists all the managed policies that are available in your AWS account""" + + @abstractmethod + def iam_list_policy_versions(self, policy_arn: str) -> dict: + """Lists information about the versions of the specified managed policy""" + + @abstractmethod + def iam_list_user_policies(self, user_name: str) -> dict: + """Lists the names of the inline policies embedded in the specified IAM user""" + + @abstractmethod + def iam_list_users(self) -> dict: + """Lists the IAM users""" + + @abstractmethod + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + """Adds or updates an inline policy document that is embedded in the specified IAM group""" + + @abstractmethod + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + """Adds or updates an inline policy document that is embedded in the specified IAM user""" + + @abstractmethod + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + """Removes the specified user from the specified group""" + + @abstractmethod + def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + """Updates the name and/or the path of the specified IAM group""" + + @abstractmethod + def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + """Updates the name and/or the path of the specified IAM user""" + + @abstractmethod + def iam_tag_user(self, user_name: str, tags: list) -> dict: + """Adds one or more tags to an IAM user""" + + @abstractmethod + def iam_list_user_tags(self, user_name: str) -> dict: + """List tags of IAM user""" + + @abstractmethod + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + """Removes the specified tags from the user""" + + # MFA methods + @abstractmethod + def iam_create_virtual_mfa_device( + self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None + ) -> tuple: + """Creates a new virtual MFA device""" + + @abstractmethod + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + """Deactivates the specified MFA device and removes it from association with the user name""" + + @abstractmethod + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + """Deletes a virtual MFA device""" + + @abstractmethod + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + """Enables the specified MFA device and associates it with the specified IAM user""" + + @abstractmethod + def iam_list_virtual_mfa_devices(self) -> dict: + """Lists the MFA devices for an IAM user""" + + @abstractmethod + def sts_get_session_token( + self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None + ) -> tuple: + """Get session token for user""" diff --git a/src/frostfs_testlib/clients/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py new file mode 100644 index 00000000..f6f423d2 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/s3_http_client.py @@ -0,0 +1,149 @@ +import hashlib +import logging +import xml.etree.ElementTree as ET + +import httpx +from botocore.auth import SigV4Auth +from botocore.awsrequest import AWSRequest +from botocore.credentials import Credentials + +from frostfs_testlib import reporter +from frostfs_testlib.clients import HttpClient +from frostfs_testlib.utils.file_utils import TestFile + +logger = logging.getLogger("NeoLogger") + +DEFAULT_TIMEOUT = 60.0 + + +class S3HttpClient: + def __init__( + self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" + ) -> None: + self.http_client = HttpClient() + self.credentials = Credentials(access_key_id, secret_access_key) + self.profile = profile + self.region = region + + self.iam_endpoint: str = None + self.s3gate_endpoint: str = None + self.service: str = None + self.signature: SigV4Auth = None + + self.set_endpoint(s3gate_endpoint) + + def _to_s3_header(self, header: str) -> dict: + replacement_map = { + "Acl": "ACL", + "_": "-", + } + + result = header + if not header.startswith("x_amz"): + result = header.title() + + for find, replace in replacement_map.items(): + result = result.replace(find, replace) + + return result + + def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None): + exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] + return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None} + + def _create_aws_request( + self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None + ) -> AWSRequest: + data = b"" + + if content is not None: + if isinstance(content, TestFile): + with open(content, "rb") as io_content: + data = io_content.read() + elif isinstance(content, str): + data = bytes(content, encoding="utf-8") + elif isinstance(content, bytes): + data = content + else: + raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}") + + headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest() + aws_request = AWSRequest(method, url, headers, data, params) + self.signature.add_auth(aws_request) + + return aws_request + + def _exec_request( + self, + method: str, + url: str, + headers: dict, + content: str | bytes | TestFile = None, + params: dict = None, + timeout: float = DEFAULT_TIMEOUT, + ) -> dict: + aws_request = self._create_aws_request(method, url, headers, content, params) + response = self.http_client.send( + aws_request.method, + aws_request.url, + headers=dict(aws_request.headers), + data=aws_request.data, + params=aws_request.params, + timeout=timeout, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError: + raise httpx.HTTPStatusError(response.text, request=response.request, response=response) + + root = ET.fromstring(response.read()) + data = { + "LastModified": root.find(".//LastModified").text, + "ETag": root.find(".//ETag").text, + } + + if response.headers.get("x-amz-version-id"): + data["VersionId"] = response.headers.get("x-amz-version-id") + + return data + + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + if self.s3gate_endpoint == s3gate_endpoint: + return + + self.s3gate_endpoint = s3gate_endpoint + self.service = "s3" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + + @reporter.step("Set endpoint IAM to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + if self.iam_endpoint == iam_endpoint: + return + + self.iam_endpoint = iam_endpoint + self.service = "iam" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + + @reporter.step("Patch object S3") + def patch_object( + self, + bucket: str, + key: str, + content: str | bytes | TestFile, + content_range: str, + version_id: str = None, + if_match: str = None, + if_unmodified_since: str = None, + x_amz_expected_bucket_owner: str = None, + timeout: float = DEFAULT_TIMEOUT, + ) -> dict: + if content_range and not content_range.startswith("bytes"): + content_range = f"bytes {content_range}/*" + + url = f"{self.s3gate_endpoint}/{bucket}/{key}" + headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"]) + params = {"VersionId": version_id} if version_id is not None else None + + return self._exec_request("PATCH", url, headers, content, params, timeout=timeout) diff --git a/src/frostfs_testlib/credentials/authmate_s3_provider.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py index 66c50157..ed6454bc 100644 --- a/src/frostfs_testlib/credentials/authmate_s3_provider.py +++ b/src/frostfs_testlib/credentials/authmate_s3_provider.py @@ -1,5 +1,4 @@ import re -from datetime import datetime from typing import Optional from frostfs_testlib import reporter @@ -10,6 +9,7 @@ from frostfs_testlib.shell import LocalShell from frostfs_testlib.steps.cli.container import list_containers from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate +from frostfs_testlib.utils import string_utils class AuthmateS3CredentialsProvider(S3CredentialsProvider): @@ -22,7 +22,7 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider): gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] # unique short bucket name - bucket = f"bucket-{hex(int(datetime.now().timestamp()*1000000))}" + bucket = string_utils.unique_name("bucket-") frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) issue_secret_output = frostfs_authmate.secret.issue( diff --git a/src/frostfs_testlib/credentials/interfaces.py b/src/frostfs_testlib/credentials/interfaces.py index c863da0d..b2ae6f18 100644 --- a/src/frostfs_testlib/credentials/interfaces.py +++ b/src/frostfs_testlib/credentials/interfaces.py @@ -26,7 +26,7 @@ class S3CredentialsProvider(ABC): self.cluster = cluster @abstractmethod - def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials: + def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials: raise NotImplementedError("Directly called abstract class?") @@ -35,7 +35,7 @@ class GrpcCredentialsProvider(ABC): self.cluster = cluster @abstractmethod - def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: + def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo: raise NotImplementedError("Directly called abstract class?") diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py new file mode 100644 index 00000000..7d767d23 --- /dev/null +++ b/src/frostfs_testlib/fixtures.py @@ -0,0 +1,52 @@ +import logging +import os +from datetime import datetime +from importlib.metadata import entry_points + +import pytest +import yaml + +from frostfs_testlib import reporter +from frostfs_testlib.hosting.hosting import Hosting +from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE +from frostfs_testlib.storage import get_service_registry + + +@pytest.fixture(scope="session", autouse=True) +def session_start_time(): + start_time = datetime.utcnow() + return start_time + + +@pytest.fixture(scope="session") +def configure_testlib(): + reporter.get_reporter().register_handler(reporter.AllureHandler()) + reporter.get_reporter().register_handler(reporter.StepsLogger()) + logging.getLogger("paramiko").setLevel(logging.INFO) + + # Register Services for cluster + registry = get_service_registry() + services = entry_points(group="frostfs.testlib.services") + for svc in services: + registry.register_service(svc.name, svc.load()) + + +@pytest.fixture(scope="session") +def temp_directory(configure_testlib): + with reporter.step("Prepare tmp directory"): + full_path = ASSETS_DIR + if not os.path.exists(full_path): + os.mkdir(full_path) + + return full_path + + +@pytest.fixture(scope="session") +def hosting(configure_testlib) -> Hosting: + with open(HOSTING_CONFIG_FILE, "r") as file: + hosting_config = yaml.full_load(file) + + hosting_instance = Hosting() + hosting_instance.configure(hosting_config) + + return hosting_instance diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 0443e284..fc7ba59a 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -47,6 +47,14 @@ class BasicHealthcheck(Healthcheck): self._perform(cluster_node, checks) + @wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}") + def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: + checks = { + self._tree_healthcheck: {}, + } + + self._perform(cluster_node, checks) + @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}") def services_healthcheck(self, cluster_node: ClusterNode): svcs_to_check = cluster_node.services diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py index c665b8a1..cf178520 100644 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -19,3 +19,7 @@ class Healthcheck(ABC): @abstractmethod def services_healthcheck(self, cluster_node: ClusterNode): """Perform service status check on target cluster node""" + + @abstractmethod + def tree_healthcheck(self, cluster_node: ClusterNode): + """Perform tree healthcheck on target cluster node""" diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py new file mode 100644 index 00000000..d7e4cc84 --- /dev/null +++ b/src/frostfs_testlib/hooks.py @@ -0,0 +1,31 @@ +import pytest + + +@pytest.hookimpl(specname="pytest_collection_modifyitems") +def pytest_add_frostfs_marker(items: list[pytest.Item]): + # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding + # nodeid = full path of the test + # 1. plugins + # 2. testlib itself + for item in items: + location = item.location[0] + if "frostfs" in location and "plugin" not in location and "testlib" not in location: + item.add_marker("frostfs") + + +# pytest hook. Do not rename +@pytest.hookimpl(trylast=True) +def pytest_collection_modifyitems(items: list[pytest.Item]): + # The order of running tests corresponded to the suites + items.sort(key=lambda item: item.location[0]) + + # Change order of tests based on @pytest.mark.order() marker + def order(item: pytest.Item) -> int: + order_marker = item.get_closest_marker("order") + if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)): + raise RuntimeError("Incorrect usage of pytest.mark.order") + + order_value = order_marker.args[0] if order_marker else 0 + return order_value + + items.sort(key=lambda item: order(item)) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index f52f8b72..6cdee39a 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -60,6 +60,7 @@ class HostConfig: """ plugin_name: str + hostname: str healthcheck_plugin_name: str address: str s3_creds_plugin_name: str = field(default="authmate") diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 3c9883aa..d458b0a7 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -164,6 +164,9 @@ class DockerHost(Host): return volume_path + def send_signal_to_service(self, service_name: str, signal: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_metabase(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") @@ -185,6 +188,12 @@ class DockerHost(Host): def is_file_exist(self, file_path: str) -> None: raise NotImplementedError("Not implemented for docker") + def wipefs_storage_node_data(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def finish_wipefs(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: volume_path = self.get_data_directory(service_name) @@ -240,6 +249,8 @@ class DockerHost(Host): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, + priority: Optional[str] = None, + word_count: bool = None, ) -> str: client = self._get_docker_client() filtered_logs = "" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 13051e2c..a41161c5 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -29,6 +29,9 @@ class Host(ABC): self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} + def __repr__(self) -> str: + return self.config.address + @property def config(self) -> HostConfig: """Returns config of the host. @@ -117,6 +120,17 @@ class Host(ABC): service_name: Name of the service to stop. """ + @abstractmethod + def send_signal_to_service(self, service_name: str, signal: str) -> None: + """Send signal to service with specified name using kill - + + The service must be hosted on this host. + + Args: + service_name: Name of the service to stop. + signal: signal name. See kill -l to all names + """ + @abstractmethod def mask_service(self, service_name: str) -> None: """Prevent the service from start by any activity by masking it. @@ -178,6 +192,21 @@ class Host(ABC): cache_only: To delete cache only. """ + @abstractmethod + def wipefs_storage_node_data(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + + def finish_wipefs(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + @abstractmethod def delete_fstree(self, service_name: str) -> None: """ @@ -297,6 +326,8 @@ class Host(ABC): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, + priority: Optional[str] = None, + word_count: bool = None, ) -> str: """Get logs from host filtered by regex. @@ -305,6 +336,9 @@ class Host(ABC): since: If set, limits the time from which logs should be collected. Must be in UTC. until: If set, limits the time until which logs should be collected. Must be in UTC. unit: required unit. + priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. + For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. + word_count: output type, expected values: lines, bytes, json Returns: Found entries as str if any found. diff --git a/src/frostfs_testlib/load/interfaces/scenario_runner.py b/src/frostfs_testlib/load/interfaces/scenario_runner.py index 45c13171..c0062a98 100644 --- a/src/frostfs_testlib/load/interfaces/scenario_runner.py +++ b/src/frostfs_testlib/load/interfaces/scenario_runner.py @@ -1,5 +1,6 @@ from abc import ABC, abstractmethod +from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import LoadParams from frostfs_testlib.storage.cluster import ClusterNode @@ -48,3 +49,7 @@ class ScenarioRunner(ABC): @abstractmethod def get_results(self) -> dict: """Get results from K6 run""" + + @abstractmethod + def get_loaders(self) -> list[Loader]: + """Return loaders""" diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py index 54947b46..4be33ef0 100644 --- a/src/frostfs_testlib/load/interfaces/summarized.py +++ b/src/frostfs_testlib/load/interfaces/summarized.py @@ -86,7 +86,7 @@ class SummarizedStats: target.latencies.by_node[node_key] = operation.latency target.throughput += operation.throughput target.errors.threshold = load_params.error_threshold - target.total_bytes = operation.total_bytes + target.total_bytes += operation.total_bytes if operation.failed_iterations: target.errors.by_node[node_key] = operation.failed_iterations diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 2a546c40..38302032 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -25,6 +25,16 @@ def convert_time_to_seconds(time: int | str | None) -> int: return seconds +def force_list(input: str | list[str]): + if input is None: + return None + + if isinstance(input, list): + return list(map(str.strip, input)) + + return [input.strip()] + + class LoadType(Enum): gRPC = "grpc" S3 = "s3" @@ -119,6 +129,8 @@ class NodesSelectionStrategy(Enum): ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST" # Select ONE random node except under test (useful for failover). RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST" + # Select node under test + NODE_UNDER_TEST = "NODE_UNDER_TEST" class EndpointSelectionStrategy(Enum): @@ -140,8 +152,29 @@ class K6ProcessAllocationStrategy(Enum): PER_ENDPOINT = "PER_ENDPOINT" +class MetaConfig: + def _get_field_formatter(self, field_name: str) -> Callable | None: + data_fields = fields(self) + formatters = [ + field.metadata["formatter"] + for field in data_fields + if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None + ] + if formatters: + return formatters[0] + + return None + + def __setattr__(self, field_name, value): + formatter = self._get_field_formatter(field_name) + if formatter: + value = formatter(value) + + super().__setattr__(field_name, value) + + @dataclass -class Preset: +class Preset(MetaConfig): # ------ COMMON ------ # Amount of objects which should be created objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False) @@ -149,20 +182,24 @@ class Preset: pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) # Workers count for preset workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) - # Acl for container/buckets + # TODO: Deprecated. Acl for container/buckets acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) + # APE rule for containers instead of deprecated ACL + rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list) # ------ GRPC ------ # Amount of containers which should be created containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) # Container placement policy for containers for gRPC - container_placement_policy: Optional[str] = metadata_field(grpc_preset_scenarios, "policy", None, False) + container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list) + # Number of retries for creation of container + container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False) # ------ S3 ------ # Amount of buckets which should be created buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False) # S3 region (AKA placement policy for S3 buckets) - s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False) + s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list) # Delay between containers creation and object upload for preset object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False) @@ -175,7 +212,7 @@ class Preset: @dataclass -class PrometheusParams: +class PrometheusParams(MetaConfig): # Prometheus server URL server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False) # Prometheus trend stats @@ -185,7 +222,7 @@ class PrometheusParams: @dataclass -class LoadParams: +class LoadParams(MetaConfig): # ------- CONTROL PARAMS ------- # Load type can be gRPC, HTTP, S3. load_type: LoadType @@ -233,6 +270,8 @@ class LoadParams: ) # Percentage of filling of all data disks on all nodes fill_percent: Optional[float] = None + # if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved. + max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB") # if set, the payload is generated on the fly and is not read into memory fully. streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) # Output format @@ -408,6 +447,11 @@ class LoadParams: # For preset calls, bool values are passed with just -- if the value is True return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else "" + if isinstance(meta_field.value, list): + return ( + " ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else "" + ) + return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'" @staticmethod @@ -427,25 +471,6 @@ class LoadParams: return fields_with_data or [] - def _get_field_formatter(self, field_name: str) -> Callable | None: - data_fields = fields(self) - formatters = [ - field.metadata["formatter"] - for field in data_fields - if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None - ] - if formatters: - return formatters[0] - - return None - - def __setattr__(self, field_name, value): - formatter = self._get_field_formatter(field_name) - if formatter: - value = formatter(value) - - super().__setattr__(field_name, value) - def __str__(self) -> str: load_type_str = self.scenario.value if self.scenario else self.load_type.value # TODO: migrate load_params defaults to testlib diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index cbf6f64c..97b0ffaf 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -57,6 +57,8 @@ class LoadVerifier: invalid_objects = verify_metrics.read.failed_iterations total_left_objects = load_metrics.write.success_iterations - delete_success + if invalid_objects > 0: + issues.append(f"There were {invalid_objects} verification fails (hash mismatch).") # Due to interruptions we may see total verified objects to be less than written on writers count if abs(total_left_objects - verified_objects) > writers: issues.append( diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index a34786fa..1ceac091 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -30,6 +30,7 @@ from frostfs_testlib.utils.file_keeper import FileKeeper class RunnerBase(ScenarioRunner): k6_instances: list[K6] + loaders: list[Loader] @reporter.step("Run preset on loaders") def preset(self): @@ -49,9 +50,11 @@ class RunnerBase(ScenarioRunner): def get_k6_instances(self): return self.k6_instances + def get_loaders(self) -> list[Loader]: + return self.loaders + class DefaultRunner(RunnerBase): - loaders: list[Loader] user: User def __init__( @@ -228,7 +231,6 @@ class DefaultRunner(RunnerBase): class LocalRunner(RunnerBase): - loaders: list[Loader] cluster_state_controller: ClusterStateController file_keeper: FileKeeper user: User diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 56249401..071675a1 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -193,7 +193,7 @@ class RemoteProcess: ) if "No such file or directory" in terminal.stderr: return None - elif terminal.stderr or terminal.return_code != 0: + elif terminal.return_code != 0: raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") return terminal.stdout diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 7f8d2c48..53bcfaa4 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -46,3 +46,11 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file: MAX_REQUEST_ATTEMPTS = 5 RETRY_MODE = "standard" CREDENTIALS_CREATE_TIMEOUT = "1m" + + +HOSTING_CONFIG_FILE = os.getenv( + "HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml")) +) + +MORE_LOG = os.getenv("MORE_LOG", "1") +EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH" diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index e2e4c48e..15e29771 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -1,5 +1,6 @@ # Regex patterns of status codes of Container service CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" +SUBJECT_NOT_FOUND = "code = 1024.*message =.*chain/client.*subject not found.*" # Regex patterns of status codes of Object service MALFORMED_REQUEST = "code = 1024.*message = malformed request" @@ -9,6 +10,7 @@ OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" SESSION_NOT_FOUND = "code = 4096.*message = session token not found" OUT_OF_RANGE = "code = 2053.*message = out of range" EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" +ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied" # TODO: Change to codes with message # OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" # LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed @@ -23,6 +25,14 @@ INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow" INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" -S3_MALFORMED_XML_REQUEST = ( - "The XML you provided was not well-formed or did not validate against our published schema." -) +S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" +S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." + +RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" +# Errors from node missing reasons if request was forwarded. Commenting for now +# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" +RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request" +NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" +# Errors from node missing reasons if request was forwarded. Commenting for now +# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" +NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request" diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index 97193ccf..ad3ed1c7 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -26,6 +26,7 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv( ) BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") +PRESET_CONTAINER_CREATION_RETRY_COUNT = os.getenv("CONTAINER_CREATION_RETRY_COUNT", "20") # TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1") K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") diff --git a/src/frostfs_testlib/resources/optionals.py b/src/frostfs_testlib/resources/optionals.py index 2a7ff224..6caf158f 100644 --- a/src/frostfs_testlib/resources/optionals.py +++ b/src/frostfs_testlib/resources/optionals.py @@ -16,11 +16,10 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD") OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true")) # Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped. -OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool( - os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true") -) +OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")) # Set this to False for disable autouse fixture like node healthcheck during developing time. -OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool( - os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true") -) +OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")) + +# Use cache for fixtures with @cachec_fixture decorator +OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false")) diff --git a/src/frostfs_testlib/resources/s3_acl_grants.py b/src/frostfs_testlib/resources/s3_acl_grants.py new file mode 100644 index 00000000..a716bc51 --- /dev/null +++ b/src/frostfs_testlib/resources/s3_acl_grants.py @@ -0,0 +1,9 @@ +ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers" +ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"} +ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"} +CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"} + +# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl +PRIVATE_GRANTS = [] +PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT] +PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT] diff --git a/src/frostfs_testlib/s3/__init__.py b/src/frostfs_testlib/s3/__init__.py deleted file mode 100644 index 32426c26..00000000 --- a/src/frostfs_testlib/s3/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from frostfs_testlib.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py deleted file mode 100644 index e4f2bb2e..00000000 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ /dev/null @@ -1,752 +0,0 @@ -import json -import logging -import os -import uuid -from datetime import datetime -from time import sleep -from typing import Literal, Optional, Union - -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict -from frostfs_testlib.shell import CommandOptions -from frostfs_testlib.shell.local_shell import LocalShell - -# TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import _configure_aws_cli - -logger = logging.getLogger("NeoLogger") -command_options = CommandOptions(timeout=480) - - -class AwsCliClient(S3ClientWrapper): - __repr_name__: str = "AWS CLI" - - # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed - # certificate in devenv) and disable automatic pagination in CLI output - common_flags = "--no-verify-ssl --no-paginate" - s3gate_endpoint: str - - @reporter.step("Configure S3 client (aws cli)") - def __init__( - self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" - ) -> None: - self.s3gate_endpoint = s3gate_endpoint - self.profile = profile - self.local_shell = LocalShell() - try: - _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key) - self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") - self.local_shell.exec( - f"aws configure set retry_mode {RETRY_MODE} --profile {profile}", - ) - except Exception as err: - raise RuntimeError("Error while configuring AwsCliClient") from err - - @reporter.step("Set endpoint S3 to {s3gate_endpoint}") - def set_endpoint(self, s3gate_endpoint: str): - self.s3gate_endpoint = s3gate_endpoint - - @reporter.step("Create bucket S3") - def create_bucket( - self, - bucket: Optional[str] = None, - object_lock_enabled_for_bucket: Optional[bool] = None, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - grant_full_control: Optional[str] = None, - location_constraint: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = str(uuid.uuid4()) - - if object_lock_enabled_for_bucket is None: - object_lock = "" - elif object_lock_enabled_for_bucket: - object_lock = " --object-lock-enabled-for-bucket" - else: - object_lock = " --no-object-lock-enabled-for-bucket" - cmd = ( - f"aws {self.common_flags} s3api create-bucket --bucket {bucket} " - f"{object_lock} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if grant_full_control: - cmd += f" --grant-full-control {grant_full_control}" - if grant_write: - cmd += f" --grant-write {grant_write}" - if grant_read: - cmd += f" --grant-read {grant_read}" - if location_constraint: - cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" - self.local_shell.exec(cmd) - sleep(S3_SYNC_WAIT_TIME) - - return bucket - - @reporter.step("List buckets S3") - def list_buckets(self) -> list[str]: - cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - buckets_json = self._to_json(output) - return [bucket["Name"] for bucket in buckets_json["Buckets"]] - - @reporter.step("Delete bucket S3") - def delete_bucket(self, bucket: str) -> None: - cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - self.local_shell.exec(cmd, command_options) - sleep(S3_SYNC_WAIT_TIME) - - @reporter.step("Head bucket S3") - def head_bucket(self, bucket: str) -> None: - cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - self.local_shell.exec(cmd) - - @reporter.step("Put bucket versioning status") - def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - cmd = ( - f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " - f"--versioning-configuration Status={status.value} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get bucket versioning status") - def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Status") - - @reporter.step("Put bucket tagging") - def put_bucket_tagging(self, bucket: str, tags: list) -> None: - tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} - cmd = ( - f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " - f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get bucket tagging") - def get_bucket_tagging(self, bucket: str) -> list: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("TagSet") - - @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> list: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Grants") - - @reporter.step("Get bucket location") - def get_bucket_location(self, bucket: str) -> dict: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("LocationConstraint") - - @reporter.step("List objects S3") - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = ( - f"aws {self.common_flags} s3api list-objects --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - - return response if full_output else obj_list - - @reporter.step("List objects S3 v2") - def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = ( - f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - - return response if full_output else obj_list - - @reporter.step("List objects versions S3") - def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: - cmd = ( - f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response if full_output else response.get("Versions", []) - - @reporter.step("List objects delete markers S3") - def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: - cmd = ( - f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response if full_output else response.get("DeleteMarkers", []) - - @reporter.step("Copy object S3") - def copy_object( - self, - source_bucket: str, - source_key: str, - bucket: Optional[str] = None, - key: Optional[str] = None, - acl: Optional[str] = None, - metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, - metadata: Optional[dict] = None, - tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, - tagging: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = source_bucket - if key is None: - key = os.path.join(os.getcwd(), str(uuid.uuid4())) - copy_source = f"{source_bucket}/{source_key}" - - cmd = ( - f"aws {self.common_flags} s3api copy-object --copy-source {copy_source} " - f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if metadata_directive: - cmd += f" --metadata-directive {metadata_directive}" - if metadata: - cmd += " --metadata " - for meta_key, value in metadata.items(): - cmd += f" {meta_key}={value}" - if tagging_directive: - cmd += f" --tagging-directive {tagging_directive}" - if tagging: - cmd += f" --tagging {tagging}" - self.local_shell.exec(cmd, command_options) - return key - - @reporter.step("Put object S3") - def put_object( - self, - bucket: str, - filepath: str, - key: Optional[str] = None, - metadata: Optional[dict] = None, - tagging: Optional[str] = None, - acl: Optional[str] = None, - object_lock_mode: Optional[str] = None, - object_lock_retain_until_date: Optional[datetime] = None, - object_lock_legal_hold_status: Optional[str] = None, - grant_full_control: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> str: - if key is None: - key = os.path.basename(filepath) - - cmd = ( - f"aws {self.common_flags} s3api put-object --bucket {bucket} --key {key} " - f"--body {filepath} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if metadata: - cmd += " --metadata" - for key, value in metadata.items(): - cmd += f" {key}={value}" - if tagging: - cmd += f" --tagging '{tagging}'" - if acl: - cmd += f" --acl {acl}" - if object_lock_mode: - cmd += f" --object-lock-mode {object_lock_mode}" - if object_lock_retain_until_date: - cmd += f' --object-lock-retain-until-date "{object_lock_retain_until_date}"' - if object_lock_legal_hold_status: - cmd += f" --object-lock-legal-hold-status {object_lock_legal_hold_status}" - if grant_full_control: - cmd += f" --grant-full-control '{grant_full_control}'" - if grant_read: - cmd += f" --grant-read {grant_read}" - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - return response.get("VersionId") - - @reporter.step("Head object S3") - def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @reporter.step("Get object S3") - def get_object( - self, - bucket: str, - key: str, - version_id: Optional[str] = None, - object_range: Optional[tuple[int, int]] = None, - full_output: bool = False, - ) -> Union[dict, str]: - file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " - f"{version} {file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if object_range: - cmd += f" --range bytes={object_range[0]}-{object_range[1]}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response if full_output else file_path - - @reporter.step("Get object ACL") - def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Grants") - - @reporter.step("Put object ACL") - def put_object_acl( - self, - bucket: str, - key: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> list: - cmd = ( - f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " - f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if grant_write: - cmd += f" --grant-write {grant_write}" - if grant_read: - cmd += f" --grant-read {grant_read}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Grants") - - @reporter.step("Put bucket ACL") - def put_bucket_acl( - self, - bucket: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> None: - cmd = ( - f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " - f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if grant_write: - cmd += f" --grant-write {grant_write}" - if grant_read: - cmd += f" --grant-read {grant_read}" - self.local_shell.exec(cmd) - - @reporter.step("Delete objects S3") - def delete_objects(self, bucket: str, keys: list[str]) -> dict: - file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") - delete_structure = json.dumps(_make_objs_dict(keys)) - with open(file_path, "w") as out_file: - out_file.write(delete_structure) - logger.info(f"Input file for delete-objects: {delete_structure}") - - cmd = ( - f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " - f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME) - return response - - @reporter.step("Delete object S3") - def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api delete-object --bucket {bucket} " - f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - sleep(S3_SYNC_WAIT_TIME) - return self._to_json(output) - - @reporter.step("Delete object versions S3") - def delete_object_versions(self, bucket: str, object_versions: list) -> dict: - # Build deletion list in S3 format - delete_list = { - "Objects": [ - { - "Key": object_version["Key"], - "VersionId": object_version["VersionId"], - } - for object_version in object_versions - ] - } - - file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") - delete_structure = json.dumps(delete_list) - with open(file_path, "w") as out_file: - out_file.write(delete_structure) - logger.info(f"Input file for delete-objects: {delete_structure}") - - cmd = ( - f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " - f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - sleep(S3_SYNC_WAIT_TIME) - return self._to_json(output) - - @reporter.step("Delete object versions S3 without delete markers") - def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: - # Delete objects without creating delete markers - for object_version in object_versions: - self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) - - @reporter.step("Get object attributes") - def get_object_attributes( - self, - bucket: str, - key: str, - attributes: list[str], - version_id: str = "", - max_parts: int = 0, - part_number: int = 0, - full_output: bool = True, - ) -> dict: - - attrs = ",".join(attributes) - version = f" --version-id {version_id}" if version_id else "" - parts = f"--max-parts {max_parts}" if max_parts else "" - part_number_str = f"--part-number-marker {part_number}" if part_number else "" - cmd = ( - f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " - f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - for attr in attributes: - assert attr in response, f"Expected attribute {attr} in {response}" - - if full_output: - return response - else: - return response.get(attributes[0]) - - @reporter.step("Get bucket policy") - def get_bucket_policy(self, bucket: str) -> dict: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Policy") - - @reporter.step("Put bucket policy") - def put_bucket_policy(self, bucket: str, policy: dict) -> None: - # Leaving it as is was in test repo. Double dumps to escape resulting string - # Example: - # policy = {"a": 1} - # json.dumps(policy) => {"a": 1} - # json.dumps(json.dumps(policy)) => "{\"a\": 1}" - # TODO: update this - dumped_policy = json.dumps(json.dumps(policy)) - cmd = ( - f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " - f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get bucket cors") - def get_bucket_cors(self, bucket: str) -> dict: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("CORSRules") - - @reporter.step("Put bucket cors") - def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - cmd = ( - f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " - f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Delete bucket cors") - def delete_bucket_cors(self, bucket: str) -> None: - cmd = ( - f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Delete bucket tagging") - def delete_bucket_tagging(self, bucket: str) -> None: - cmd = ( - f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Put object retention") - def put_object_retention( - self, - bucket: str, - key: str, - retention: dict, - version_id: Optional[str] = None, - bypass_governance_retention: Optional[bool] = None, - ) -> None: - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " - f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if bypass_governance_retention is not None: - cmd += " --bypass-governance-retention" - self.local_shell.exec(cmd) - - @reporter.step("Put object legal hold") - def put_object_legal_hold( - self, - bucket: str, - key: str, - legal_hold_status: Literal["ON", "OFF"], - version_id: Optional[str] = None, - ) -> None: - version = f" --version-id {version_id}" if version_id else "" - legal_hold = json.dumps({"Status": legal_hold_status}) - cmd = ( - f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " - f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Put object tagging") - def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: - tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - tagging = {"TagSet": tags} - cmd = ( - f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " - f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get object tagging") - def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("TagSet") - - @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str) -> None: - cmd = ( - f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " - f"--key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Sync directory S3") - def sync( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - cmd = ( - f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - if metadata: - cmd += " --metadata" - for key, value in metadata.items(): - cmd += f" {key}={value}" - if acl: - cmd += f" --acl {acl}" - output = self.local_shell.exec(cmd, command_options).stdout - return self._to_json(output) - - @reporter.step("CP directory S3") - def cp( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - cmd = ( - f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " - f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}" - ) - if metadata: - cmd += " --metadata" - for key, value in metadata.items(): - cmd += f" {key}={value}" - if acl: - cmd += f" --acl {acl}" - output = self.local_shell.exec(cmd, command_options).stdout - return self._to_json(output) - - @reporter.step("Create multipart upload S3") - def create_multipart_upload(self, bucket: str, key: str) -> str: - cmd = ( - f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " - f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" - - return response["UploadId"] - - @reporter.step("List multipart uploads S3") - def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: - cmd = ( - f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Uploads") - - @reporter.step("Abort multipart upload S3") - def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - cmd = ( - f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " - f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Upload part S3") - def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: - cmd = ( - f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - assert response.get("ETag"), f"Expected ETag in response:\n{response}" - return response["ETag"] - - @reporter.step("Upload copy part S3") - def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - cmd = ( - f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" - - return response["CopyPartResult"]["ETag"] - - @reporter.step("List parts S3") - def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - cmd = ( - f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Parts"), f"Expected Parts in response:\n{response}" - - return response["Parts"] - - @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: - file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") - parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} - - with open(file_path, "w") as out_file: - out_file.write(json.dumps(parts_dict)) - - logger.info(f"Input file for complete-multipart-upload: {json.dumps(parts_dict)}") - - cmd = ( - f"aws {self.common_flags} s3api complete-multipart-upload --bucket {bucket} " - f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Put object lock configuration") - def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - cmd = ( - f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " - f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - return self._to_json(output) - - @reporter.step("Get object lock configuration") - def get_object_lock_configuration(self, bucket: str): - cmd = ( - f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("ObjectLockConfiguration") - - @staticmethod - def _to_json(output: str) -> dict: - json_output = {} - if "{" not in output and "}" not in output: - logger.warning(f"Could not parse json from output {output}") - return json_output - - json_output = json.loads(output[output.index("{") :]) - - return json_output diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py deleted file mode 100644 index cb1ec286..00000000 --- a/src/frostfs_testlib/s3/boto3_client.py +++ /dev/null @@ -1,656 +0,0 @@ -import json -import logging -import os -import uuid -from datetime import datetime -from functools import wraps -from time import sleep -from typing import Literal, Optional, Union - -import boto3 -import urllib3 -from botocore.config import Config -from botocore.exceptions import ClientError -from mypy_boto3_s3 import S3Client - -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict -from frostfs_testlib.utils.cli_utils import log_command_execution - -logger = logging.getLogger("NeoLogger") - -# Disable warnings on self-signed certificate which the -# boto library produces on requests to S3-gate in dev-env -urllib3.disable_warnings() - - -def report_error(func): - @wraps(func) - def deco(*a, **kw): - try: - return func(*a, **kw) - except ClientError as err: - log_command_execution("Result", str(err)) - raise - - return deco - - -class Boto3ClientWrapper(S3ClientWrapper): - __repr_name__: str = "Boto3 client" - - @reporter.step("Configure S3 client (boto3)") - @report_error - def __init__( - self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" - ) -> None: - self.boto3_client: S3Client = None - self.session = boto3.Session() - self.config = Config( - retries={ - "max_attempts": MAX_REQUEST_ATTEMPTS, - "mode": RETRY_MODE, - } - ) - self.access_key_id: str = access_key_id - self.secret_access_key: str = secret_access_key - self.s3gate_endpoint: str = "" - self.set_endpoint(s3gate_endpoint) - - @reporter.step("Set endpoint S3 to {s3gate_endpoint}") - def set_endpoint(self, s3gate_endpoint: str): - if self.s3gate_endpoint == s3gate_endpoint: - return - - self.s3gate_endpoint = s3gate_endpoint - - self.boto3_client: S3Client = self.session.client( - service_name="s3", - aws_access_key_id=self.access_key_id, - aws_secret_access_key=self.secret_access_key, - config=self.config, - endpoint_url=s3gate_endpoint, - verify=False, - ) - - def _to_s3_param(self, param: str): - replacement_map = { - "Acl": "ACL", - "Cors": "CORS", - "_": "", - } - result = param.title() - for find, replace in replacement_map.items(): - result = result.replace(find, replace) - return result - - # BUCKET METHODS # - @reporter.step("Create bucket S3") - @report_error - def create_bucket( - self, - bucket: Optional[str] = None, - object_lock_enabled_for_bucket: Optional[bool] = None, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - grant_full_control: Optional[str] = None, - location_constraint: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = str(uuid.uuid4()) - - params = {"Bucket": bucket} - if object_lock_enabled_for_bucket is not None: - params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) - if acl is not None: - params.update({"ACL": acl}) - elif grant_write or grant_read or grant_full_control: - if grant_write: - params.update({"GrantWrite": grant_write}) - elif grant_read: - params.update({"GrantRead": grant_read}) - elif grant_full_control: - params.update({"GrantFullControl": grant_full_control}) - if location_constraint: - params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) - - s3_bucket = self.boto3_client.create_bucket(**params) - log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) - sleep(S3_SYNC_WAIT_TIME) - return bucket - - @reporter.step("List buckets S3") - @report_error - def list_buckets(self) -> list[str]: - found_buckets = [] - - response = self.boto3_client.list_buckets() - log_command_execution("S3 List buckets result", response) - - for bucket in response["Buckets"]: - found_buckets.append(bucket["Name"]) - - return found_buckets - - @reporter.step("Delete bucket S3") - @report_error - def delete_bucket(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket(Bucket=bucket) - log_command_execution("S3 Delete bucket result", response) - sleep(S3_SYNC_WAIT_TIME) - - @reporter.step("Head bucket S3") - @report_error - def head_bucket(self, bucket: str) -> None: - response = self.boto3_client.head_bucket(Bucket=bucket) - log_command_execution("S3 Head bucket result", response) - - @reporter.step("Put bucket versioning status") - @report_error - def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - response = self.boto3_client.put_bucket_versioning( - Bucket=bucket, VersioningConfiguration={"Status": status.value} - ) - log_command_execution("S3 Set bucket versioning to", response) - - @reporter.step("Get bucket versioning status") - @report_error - def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: - response = self.boto3_client.get_bucket_versioning(Bucket=bucket) - status = response.get("Status") - log_command_execution("S3 Got bucket versioning status", response) - return status - - @reporter.step("Put bucket tagging") - @report_error - def put_bucket_tagging(self, bucket: str, tags: list) -> None: - tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - tagging = {"TagSet": tags} - response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging) - log_command_execution("S3 Put bucket tagging", response) - - @reporter.step("Get bucket tagging") - @report_error - def get_bucket_tagging(self, bucket: str) -> list: - response = self.boto3_client.get_bucket_tagging(Bucket=bucket) - log_command_execution("S3 Get bucket tagging", response) - return response.get("TagSet") - - @reporter.step("Get bucket acl") - @report_error - def get_bucket_acl(self, bucket: str) -> list: - response = self.boto3_client.get_bucket_acl(Bucket=bucket) - log_command_execution("S3 Get bucket acl", response) - return response.get("Grants") - - @reporter.step("Delete bucket tagging") - @report_error - def delete_bucket_tagging(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) - log_command_execution("S3 Delete bucket tagging", response) - - @reporter.step("Put bucket ACL") - @report_error - def put_bucket_acl( - self, - bucket: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> None: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } - response = self.boto3_client.put_bucket_acl(**params) - log_command_execution("S3 ACL bucket result", response) - - @reporter.step("Put object lock configuration") - @report_error - def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration) - log_command_execution("S3 put_object_lock_configuration result", response) - return response - - @reporter.step("Get object lock configuration") - @report_error - def get_object_lock_configuration(self, bucket: str) -> dict: - response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) - log_command_execution("S3 get_object_lock_configuration result", response) - return response.get("ObjectLockConfiguration") - - @reporter.step("Get bucket policy") - @report_error - def get_bucket_policy(self, bucket: str) -> str: - response = self.boto3_client.get_bucket_policy(Bucket=bucket) - log_command_execution("S3 get_bucket_policy result", response) - return response.get("Policy") - - @reporter.step("Put bucket policy") - @report_error - def put_bucket_policy(self, bucket: str, policy: dict) -> None: - response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)) - log_command_execution("S3 put_bucket_policy result", response) - return response - - @reporter.step("Get bucket cors") - @report_error - def get_bucket_cors(self, bucket: str) -> dict: - response = self.boto3_client.get_bucket_cors(Bucket=bucket) - log_command_execution("S3 get_bucket_cors result", response) - return response.get("CORSRules") - - @reporter.step("Get bucket location") - @report_error - def get_bucket_location(self, bucket: str) -> str: - response = self.boto3_client.get_bucket_location(Bucket=bucket) - log_command_execution("S3 get_bucket_location result", response) - return response.get("LocationConstraint") - - @reporter.step("Put bucket cors") - @report_error - def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration) - log_command_execution("S3 put_bucket_cors result", response) - return response - - @reporter.step("Delete bucket cors") - @report_error - def delete_bucket_cors(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket_cors(Bucket=bucket) - log_command_execution("S3 delete_bucket_cors result", response) - - # END OF BUCKET METHODS # - # OBJECT METHODS # - - @reporter.step("List objects S3 v2") - @report_error - def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - response = self.boto3_client.list_objects_v2(Bucket=bucket) - log_command_execution("S3 v2 List objects result", response) - - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - - return response if full_output else obj_list - - @reporter.step("List objects S3") - @report_error - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - response = self.boto3_client.list_objects(Bucket=bucket) - log_command_execution("S3 List objects result", response) - - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - - return response if full_output else obj_list - - @reporter.step("List objects versions S3") - @report_error - def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: - response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution("S3 List objects versions result", response) - return response if full_output else response.get("Versions", []) - - @reporter.step("List objects delete markers S3") - @report_error - def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: - response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution("S3 List objects delete markers result", response) - return response if full_output else response.get("DeleteMarkers", []) - - @reporter.step("Put object S3") - @report_error - def put_object( - self, - bucket: str, - filepath: str, - key: Optional[str] = None, - metadata: Optional[dict] = None, - tagging: Optional[str] = None, - acl: Optional[str] = None, - object_lock_mode: Optional[str] = None, - object_lock_retain_until_date: Optional[datetime] = None, - object_lock_legal_hold_status: Optional[str] = None, - grant_full_control: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> str: - if key is None: - key = os.path.basename(filepath) - - with open(filepath, "rb") as put_file: - body = put_file.read() - - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self", "filepath", "put_file"] and value is not None - } - response = self.boto3_client.put_object(**params) - log_command_execution("S3 Put object result", response) - return response.get("VersionId") - - @reporter.step("Head object S3") - @report_error - def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } - response = self.boto3_client.head_object(**params) - log_command_execution("S3 Head object result", response) - return response - - @reporter.step("Delete object S3") - @report_error - def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } - response = self.boto3_client.delete_object(**params) - log_command_execution("S3 Delete object result", response) - sleep(S3_SYNC_WAIT_TIME) - return response - - @reporter.step("Delete objects S3") - @report_error - def delete_objects(self, bucket: str, keys: list[str]) -> dict: - response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys)) - log_command_execution("S3 Delete objects result", response) - assert ( - "Errors" not in response - ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' - sleep(S3_SYNC_WAIT_TIME) - return response - - @reporter.step("Delete object versions S3") - @report_error - def delete_object_versions(self, bucket: str, object_versions: list) -> dict: - # Build deletion list in S3 format - delete_list = { - "Objects": [ - { - "Key": object_version["Key"], - "VersionId": object_version["VersionId"], - } - for object_version in object_versions - ] - } - response = self.boto3_client.delete_objects(Bucket=bucket, Delete=delete_list) - log_command_execution("S3 Delete objects result", response) - return response - - @reporter.step("Delete object versions S3 without delete markers") - @report_error - def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: - # Delete objects without creating delete markers - for object_version in object_versions: - response = self.boto3_client.delete_object( - Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"] - ) - log_command_execution("S3 Delete object result", response) - - @reporter.step("Put object ACL") - @report_error - def put_object_acl( - self, - bucket: str, - key: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> list: - # pytest.skip("Method put_object_acl is not supported by boto3 client") - raise NotImplementedError("Unsupported for boto3 client") - - @reporter.step("Get object ACL") - @report_error - def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } - response = self.boto3_client.get_object_acl(**params) - log_command_execution("S3 ACL objects result", response) - return response.get("Grants") - - @reporter.step("Copy object S3") - @report_error - def copy_object( - self, - source_bucket: str, - source_key: str, - bucket: Optional[str] = None, - key: Optional[str] = None, - acl: Optional[str] = None, - metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, - metadata: Optional[dict] = None, - tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, - tagging: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = source_bucket - if key is None: - key = os.path.join(os.getcwd(), str(uuid.uuid4())) - copy_source = f"{source_bucket}/{source_key}" - - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self", "source_bucket", "source_key"] and value is not None - } - response = self.boto3_client.copy_object(**params) - log_command_execution("S3 Copy objects result", response) - return key - - @reporter.step("Get object S3") - @report_error - def get_object( - self, - bucket: str, - key: str, - version_id: Optional[str] = None, - object_range: Optional[tuple[int, int]] = None, - full_output: bool = False, - ) -> Union[dict, str]: - filename = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - range_str = None - if object_range: - range_str = f"bytes={object_range[0]}-{object_range[1]}" - - params = { - self._to_s3_param(param): value - for param, value in {**locals(), **{"Range": range_str}}.items() - if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None - } - response = self.boto3_client.get_object(**params) - log_command_execution("S3 Get objects result", response) - - with open(f"{filename}", "wb") as get_file: - chunk = response["Body"].read(1024) - while chunk: - get_file.write(chunk) - chunk = response["Body"].read(1024) - return response if full_output else filename - - @reporter.step("Create multipart upload S3") - @report_error - def create_multipart_upload(self, bucket: str, key: str) -> str: - response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key) - log_command_execution("S3 Created multipart upload", response) - assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" - - return response["UploadId"] - - @reporter.step("List multipart uploads S3") - @report_error - def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: - response = self.boto3_client.list_multipart_uploads(Bucket=bucket) - log_command_execution("S3 List multipart upload", response) - - return response.get("Uploads") - - @reporter.step("Abort multipart upload S3") - @report_error - def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) - log_command_execution("S3 Abort multipart upload", response) - - @reporter.step("Upload part S3") - @report_error - def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: - with open(filepath, "rb") as put_file: - body = put_file.read() - - response = self.boto3_client.upload_part( - UploadId=upload_id, - Bucket=bucket, - Key=key, - PartNumber=part_num, - Body=body, - ) - log_command_execution("S3 Upload part", response) - assert response.get("ETag"), f"Expected ETag in response:\n{response}" - - return response["ETag"] - - @reporter.step("Upload copy part S3") - @report_error - def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - response = self.boto3_client.upload_part_copy( - UploadId=upload_id, - Bucket=bucket, - Key=key, - PartNumber=part_num, - CopySource=copy_source, - ) - log_command_execution("S3 Upload copy part", response) - assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" - - return response["CopyPartResult"]["ETag"] - - @reporter.step("List parts S3") - @report_error - def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key) - log_command_execution("S3 List part", response) - assert response.get("Parts"), f"Expected Parts in response:\n{response}" - - return response["Parts"] - - @reporter.step("Complete multipart upload S3") - @report_error - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: - parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] - response = self.boto3_client.complete_multipart_upload( - Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts} - ) - log_command_execution("S3 Complete multipart upload", response) - - @reporter.step("Put object retention") - @report_error - def put_object_retention( - self, - bucket: str, - key: str, - retention: dict, - version_id: Optional[str] = None, - bypass_governance_retention: Optional[bool] = None, - ) -> None: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } - response = self.boto3_client.put_object_retention(**params) - log_command_execution("S3 Put object retention ", response) - - @reporter.step("Put object legal hold") - @report_error - def put_object_legal_hold( - self, - bucket: str, - key: str, - legal_hold_status: Literal["ON", "OFF"], - version_id: Optional[str] = None, - ) -> None: - legal_hold = {"Status": legal_hold_status} - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self", "legal_hold_status"] and value is not None - } - response = self.boto3_client.put_object_legal_hold(**params) - log_command_execution("S3 Put object legal hold ", response) - - @reporter.step("Put object tagging") - @report_error - def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: - tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - tagging = {"TagSet": tags} - response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging) - log_command_execution("S3 Put object tagging", response) - - @reporter.step("Get object tagging") - @report_error - def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } - response = self.boto3_client.get_object_tagging(**params) - log_command_execution("S3 Get object tagging", response) - return response.get("TagSet") - - @reporter.step("Delete object tagging") - @report_error - def delete_object_tagging(self, bucket: str, key: str) -> None: - response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key) - log_command_execution("S3 Delete object tagging", response) - - @reporter.step("Get object attributes") - @report_error - def get_object_attributes( - self, - bucket: str, - key: str, - attributes: list[str], - version_id: Optional[str] = None, - max_parts: Optional[int] = None, - part_number: Optional[int] = None, - full_output: bool = True, - ) -> dict: - logger.warning("Method get_object_attributes is not supported by boto3 client") - return {} - - @reporter.step("Sync directory S3") - @report_error - def sync( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - raise NotImplementedError("Sync is not supported for boto3 client") - - @reporter.step("CP directory S3") - @report_error - def cp( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - raise NotImplementedError("Cp is not supported for boto3 client") - - # END OBJECT METHODS # diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index acf01ffd..c0f3b066 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -1,15 +1,18 @@ import logging import subprocess import tempfile +from contextlib import nullcontext from datetime import datetime from typing import IO, Optional import pexpect from frostfs_testlib import reporter +from frostfs_testlib.resources.common import MORE_LOG from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("frostfs.testlib.shell") +step_context = reporter.step if MORE_LOG == "1" else nullcontext class LocalShell(Shell): @@ -28,10 +31,10 @@ class LocalShell(Shell): for inspector in [*self.command_inspectors, *extra_inspectors]: command = inspector.inspect(original_command, command) - logger.info(f"Executing command: {command}") - if options.interactive_inputs: - return self._exec_interactive(command, options) - return self._exec_non_interactive(command, options) + with step_context(f"Executing command: {command}"): + if options.interactive_inputs: + return self._exec_interactive(command, options) + return self._exec_non_interactive(command, options) def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: start_time = datetime.utcnow() @@ -60,9 +63,7 @@ class LocalShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\n" - f"Output: {result.stdout}\n" - f"Stderr: {result.stderr}\n" + f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n" ) return result @@ -93,9 +94,7 @@ class LocalShell(Shell): stderr="", return_code=exc.returncode, ) - raise RuntimeError( - f"Command: {command}\nError:\n" f"return code: {exc.returncode}\n" f"output: {exc.output}" - ) from exc + raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc except OSError as exc: raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc finally: @@ -129,22 +128,19 @@ class LocalShell(Shell): end_time: datetime, result: Optional[CommandResult], ) -> None: - # TODO: increase logging level if return code is non 0, should be warning at least - logger.info( - f"Command: {command}\n" - f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n" - f"return code: {result.return_code if result else ''} " - f"\nOutput: {result.stdout if result else ''}" - ) + if not result: + logger.warning(f"Command: {command}\n" f"Error: result is None") + return - if result: - elapsed_time = end_time - start_time - command_attachment = ( - f"COMMAND: {command}\n" - f"RETCODE: {result.return_code}\n\n" - f"STDOUT:\n{result.stdout}\n" - f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" - ) - with reporter.step(f"COMMAND: {command}"): - reporter.attach(command_attachment, "Command execution.txt") + status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning) + log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}") + + elapsed_time = end_time - start_time + command_attachment = ( + f"COMMAND: {command}\n" + f"RETCODE: {result.return_code}\n\n" + f"STDOUT:\n{result.stdout}\n" + f"STDERR:\n{result.stderr}\n" + f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" + ) + reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index e718b4dc..3f13dcaf 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -68,8 +68,7 @@ class SshConnectionProvider: try: if creds.ssh_key_path: logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " - f"{creds.ssh_key_path} (attempt {attempt})" + f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " f"{creds.ssh_key_path} (attempt {attempt})" ) connection.connect( hostname=host, @@ -79,9 +78,7 @@ class SshConnectionProvider: timeout=self.CONNECTION_TIMEOUT, ) else: - logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})" - ) + logger.info(f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})") connection.connect( hostname=host, port=port, @@ -104,9 +101,7 @@ class SshConnectionProvider: connection.close() can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS if can_retry: - logger.warn( - f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}" - ) + logger.warn(f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}") sleep(self.SSH_ATTEMPTS_INTERVAL) continue logger.exception(f"Can't connect to host {host}") @@ -139,7 +134,7 @@ def log_command(func): f"RC:\n {result.return_code}\n" f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n" f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" ) if not options.no_log: @@ -185,13 +180,11 @@ class SSHShell(Shell): private_key_passphrase: Optional[str] = None, port: str = "22", command_inspectors: Optional[list[CommandInspector]] = None, - custom_environment: Optional[dict] = None + custom_environment: Optional[dict] = None, ) -> None: super().__init__() self.connection_provider = SshConnectionProvider() - self.connection_provider.store_creds( - host, SshCredentials(login, password, private_key_path, private_key_passphrase) - ) + self.connection_provider.store_creds(host, SshCredentials(login, password, private_key_path, private_key_passphrase)) self.host = host self.port = port @@ -220,9 +213,7 @@ class SSHShell(Shell): result = self._exec_non_interactive(command, options) if options.check and result.return_code != 0: - raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n" - ) + raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n") return result @log_command diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index fc643e20..092b1a3a 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -7,9 +7,7 @@ from typing import Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC -from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node from frostfs_testlib.storage.cluster import Cluster, ClusterNode @@ -95,6 +93,7 @@ class StorageContainer: DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" +DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" @reporter.step("Create Container") @@ -110,6 +109,8 @@ def create_container( options: Optional[dict] = None, await_mode: bool = True, wait_for_creation: bool = True, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> str: """ @@ -142,6 +143,8 @@ def create_container( result = cli.container.create( rpc_endpoint=endpoint, policy=rule, + nns_name=nns_name, + nns_zone=nns_zone, basic_acl=basic_acl, attributes=attributes, name=name, @@ -199,7 +202,6 @@ def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Op """ cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) - logger.info(f"Containers: \n{result}") return result.stdout.split() @@ -327,13 +329,6 @@ def _parse_cid(output: str) -> str: return splitted[1] -@reporter.step("Search container by name") -def search_container_by_name(name: str, node: ClusterNode): - resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) - resolver: BucketContainerResolver = resolver_cls() - return resolver.resolve(node, name) - - @reporter.step("Search for nodes with a container") def search_nodes_with_container( wallet: WalletInfo, diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 5fe60545..7f8391d7 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -12,9 +12,12 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils -from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output +from frostfs_testlib.utils.cli_utils import parse_netmap_output +from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") @@ -80,7 +83,7 @@ def get_object( no_progress: bool = True, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> str: +) -> TestFile: """ GET from FrostFS. @@ -102,14 +105,14 @@ def get_object( if not write_object: write_object = str(uuid.uuid4()) - file_path = os.path.join(ASSETS_DIR, write_object) + test_file = TestFile(os.path.join(ASSETS_DIR, write_object)) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.object.get( rpc_endpoint=endpoint, cid=cid, oid=oid, - file=file_path, + file=test_file, bearer=bearer, no_progress=no_progress, xhdr=xhdr, @@ -117,7 +120,7 @@ def get_object( timeout=timeout, ) - return file_path + return test_file @reporter.step("Get Range Hash from {endpoint}") @@ -356,7 +359,7 @@ def get_range( Returns: (str, bytes) - path to the file with range content and content of this file as bytes """ - range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) + test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.object.range( @@ -364,16 +367,16 @@ def get_range( cid=cid, oid=oid, range=range_cut, - file=range_file_path, + file=test_file, bearer=bearer, xhdr=xhdr, session=session, timeout=timeout, ) - with open(range_file_path, "rb") as file: + with open(test_file, "rb") as file: content = file.read() - return range_file_path, content + return test_file, content @reporter.step("Lock Object") @@ -614,27 +617,27 @@ def head_object( fst_line_idx = result.stdout.find("\n") decoded = json.loads(result.stdout[fst_line_idx:]) + # if response + if "chunks" in decoded.keys(): + logger.info("decoding ec chunks") + return decoded["chunks"] + # If response is Complex Object header, it has `splitId` key if "splitId" in decoded.keys(): - logger.info("decoding split header") return json_utils.decode_split_header(decoded) # If response is Last or Linking Object header, # it has `header` dictionary and non-null `split` dictionary if "split" in decoded["header"].keys(): if decoded["header"]["split"]: - logger.info("decoding linking object") return json_utils.decode_linking_object(decoded) if decoded["header"]["objectType"] == "STORAGE_GROUP": - logger.info("decoding storage group") return json_utils.decode_storage_group(decoded) if decoded["header"]["objectType"] == "TOMBSTONE": - logger.info("decoding tombstone") return json_utils.decode_tombstone(decoded) - logger.info("decoding simple header") return json_utils.decode_simple_header(decoded) @@ -688,13 +691,16 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: latest_block = first_line.split(":") # taking second line from command's output contain wallet key second_line = output.split("\n")[1] - validated_state = second_line.split(":") - return { - latest_block[0].replace(":", ""): int(latest_block[1]), - validated_state[0].replace(":", ""): int(validated_state[1]), - } + if second_line != "": + validated_state = second_line.split(":") + return { + latest_block[0].replace(":", ""): int(latest_block[1]), + validated_state[0].replace(":", ""): int(validated_state[1]), + } + return {latest_block[0].replace(":", ""): int(latest_block[1])} +@wait_for_success() @reporter.step("Search object nodes") def get_object_nodes( cluster: Cluster, @@ -714,21 +720,27 @@ def get_object_nodes( cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config) - result_object_nodes = cli.object.nodes( + response = cli.object.nodes( rpc_endpoint=endpoint, cid=cid, oid=oid, bearer=bearer, ttl=1 if is_direct else None, + json=True, xhdr=xhdr, timeout=timeout, verify_presence_all=verify_presence_all, ) - parsing_output = parse_cmd_table(result_object_nodes.stdout, "|") - list_object_nodes = [ - node for node in parsing_output if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true" - ] + response_json = json.loads(response.stdout) + # Currently, the command will show expected and confirmed nodes. + # And we (currently) count only nodes which are both expected and confirmed + object_nodes_id = { + required_node + for data_object in response_json["data_objects"] + for required_node in data_object["required_nodes"] + if required_node in data_object["confirmed_nodes"] + } netmap_nodes_list = parse_netmap_output( cli.netmap.snapshot( @@ -737,14 +749,14 @@ def get_object_nodes( ).stdout ) netmap_nodes = [ - netmap_node - for object_node in list_object_nodes - for netmap_node in netmap_nodes_list - if object_node["node_id"] == netmap_node.node_id + netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id ] - result = [ - cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip + object_nodes = [ + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) ] - return result + return object_nodes diff --git a/src/frostfs_testlib/steps/cli/tree.py b/src/frostfs_testlib/steps/cli/tree.py new file mode 100644 index 00000000..4b0dfb34 --- /dev/null +++ b/src/frostfs_testlib/steps/cli/tree.py @@ -0,0 +1,35 @@ +import logging +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.plugins import load_plugin +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + +logger = logging.getLogger("NeoLogger") + + + +@reporter.step("Get Tree List") +def get_tree_list( + wallet: WalletInfo, + cid: str, + shell: Shell, + endpoint: str, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> None: + """ + A wrapper for `frostfs-cli tree list` call. + Args: + wallet (WalletInfo): path to a wallet on whose behalf we delete the container + cid (str): ID of the container to delete + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + This function doesn't return anything. + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout) diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index ce7ed12e..6ec5483a 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -69,7 +69,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] @reporter.step("Tick Epoch") -def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): +def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None, delta: Optional[int] = None): """ Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) Args: @@ -88,12 +88,17 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH, ) - frostfs_adm.morph.force_new_epoch() + frostfs_adm.morph.force_new_epoch(delta=delta) return # Otherwise we tick epoch using transaction cur_epoch = get_epoch(shell, cluster) + if delta: + next_epoch = cur_epoch + delta + else: + next_epoch = cur_epoch + 1 + # Use first node by default ir_node = cluster.services(InnerRing)[0] # In case if no local_wallet_path is provided, we use wallet_path @@ -110,7 +115,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] wallet_password=ir_wallet_pass, scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell), method="newEpoch", - arguments=f"int:{cur_epoch + 1}", + arguments=f"int:{next_epoch}", multisig_hash=f"{ir_address}:Global", address=ir_address, rpc_endpoint=morph_endpoint, diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http_gate.py similarity index 80% rename from src/frostfs_testlib/steps/http/http_gate.py rename to src/frostfs_testlib/steps/http_gate.py index 3f4d838d..aa4abf29 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -12,19 +12,18 @@ import requests from frostfs_testlib import reporter from frostfs_testlib.cli import GenericCli -from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE -from frostfs_testlib.s3.aws_cli_client import command_options +from frostfs_testlib.clients.s3.aws_cli_client import command_options +from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE from frostfs_testlib.shell import Shell from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object from frostfs_testlib.storage.cluster import ClusterNode, StorageNode from frostfs_testlib.testing.test_control import retry -from frostfs_testlib.utils.file_utils import get_file_hash +from frostfs_testlib.utils.file_utils import TestFile, get_file_hash logger = logging.getLogger("NeoLogger") -ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") local_shell = LocalShell() @@ -34,42 +33,44 @@ def get_via_http_gate( oid: str, node: ClusterNode, request_path: Optional[str] = None, + presigned_url: Optional[str] = None, timeout: Optional[int] = 300, ): """ This function gets given object from HTTP gate cid: container id to get object from - oid: object ID + oid: object id / object key node: node to make request request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] """ - # if `request_path` parameter omitted, use default - if request_path is None: - request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - else: + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" + if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get( - request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False - ) + if presigned_url: + request = presigned_url - if not resp.ok: + response = requests.get(request, stream=True, timeout=timeout, verify=False) + + if not response.ok: raise Exception( f"""Failed to get object via HTTP gate: - request: {resp.request.path_url}, - response: {resp.text}, - headers: {resp.headers}, - status code: {resp.status_code} {resp.reason}""" + request: {response.request.path_url}, + response: {response.text}, + headers: {response.headers}, + status code: {response.status_code} {response.reason}""" ) logger.info(f"Request: {request}") - _attach_allure_step(request, resp.status_code) + _attach_allure_step(request, response.status_code) - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}") - with open(file_path, "wb") as file: - shutil.copyfileobj(resp.raw, file) - return file_path + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) + with open(test_file, "wb") as file: + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + + return test_file @reporter.step("Get via Zip HTTP Gate") @@ -95,11 +96,11 @@ def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Opt logger.info(f"Request: {request}") _attach_allure_step(request, resp.status_code) - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip") - with open(file_path, "wb") as file: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")) + with open(test_file, "wb") as file: shutil.copyfileobj(resp.raw, file) - with zipfile.ZipFile(file_path, "r") as zip_ref: + with zipfile.ZipFile(test_file, "r") as zip_ref: zip_ref.extractall(ASSETS_DIR) return os.path.join(os.getcwd(), ASSETS_DIR, prefix) @@ -118,20 +119,17 @@ def get_via_http_gate_by_attribute( cid: CID to get object from attribute: attribute {name: attribute} value pair endpoint: http gate endpoint - http_hostname: http host name on the node request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] """ + attr_name = list(attribute.keys())[0] attr_value = quote_plus(str(attribute.get(attr_name))) - # if `request_path` parameter ommited, use default - if request_path is None: - request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" - else: + + request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get( - request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]} - ) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -145,17 +143,14 @@ def get_via_http_gate_by_attribute( logger.info(f"Request: {request}") _attach_allure_step(request, resp.status_code) - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}") - with open(file_path, "wb") as file: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")) + with open(test_file, "wb") as file: shutil.copyfileobj(resp.raw, file) - return file_path + return test_file -# TODO: pass http_hostname as a header @reporter.step("Upload via HTTP Gate") -def upload_via_http_gate( - cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 -) -> str: +def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str: """ This function upload given object through HTTP gate cid: CID to get object from @@ -198,7 +193,6 @@ def is_object_large(filepath: str) -> bool: return False -# TODO: pass http_hostname as a header @reporter.step("Upload via HTTP Gate using Curl") def upload_via_http_gate_curl( cid: str, @@ -248,7 +242,7 @@ def upload_via_http_gate_curl( @retry(max_attempts=3, sleep_interval=1) @reporter.step("Get via HTTP Gate using Curl") -def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: +def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile: """ This function gets given object from HTTP gate using curl utility. cid: CID to get object from @@ -256,12 +250,12 @@ def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: node: node for request """ request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")) curl = GenericCli("curl", node.host) - curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {file_path}", shell=local_shell) + curl(f"-k ", f"{request} > {test_file}", shell=local_shell) - return file_path + return test_file def _attach_allure_step(request: str, status_code: int, req_type="GET"): @@ -367,19 +361,9 @@ def try_to_get_object_via_passed_request_and_expect_error( ) -> None: try: if attrs is None: - get_via_http_gate( - cid=cid, - oid=oid, - node=node, - request_path=http_request_path, - ) + get_via_http_gate(cid, oid, node, http_request_path) else: - get_via_http_gate_by_attribute( - cid=cid, - attribute=attrs, - node=node, - request_path=http_request_path, - ) + get_via_http_gate_by_attribute(cid, attrs, node, http_request_path) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py new file mode 100644 index 00000000..0d0950aa --- /dev/null +++ b/src/frostfs_testlib/steps/metrics.py @@ -0,0 +1,45 @@ +import re + +from frostfs_testlib import reporter +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import wait_for_success + + +@reporter.step("Check metrics result") +@wait_for_success(max_wait_time=300, interval=10) +def check_metrics_counter( + cluster_nodes: list[ClusterNode], + operator: str = "==", + counter_exp: int = 0, + parse_from_command: bool = False, + **metrics_greps: str, +): + counter_act = 0 + for cluster_node in cluster_nodes: + counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) + assert eval( + f"{counter_act} {operator} {counter_exp}" + ), f"Actual: {counter_act} {operator} Expected: {counter_exp} in nodes: {cluster_nodes}" + + +@reporter.step("Get metrics value from node: {node}") +def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str): + try: + command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps) + if parse_from_command: + metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps) + else: + metrics_counter = calc_metrics_count_from_stdout(command_result.stdout) + except RuntimeError as e: + metrics_counter = 0 + + return metrics_counter + + +@reporter.step("Parse metrics count and calc sum of result") +def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None): + if command: + result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout) + else: + result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout) + return sum(map(lambda x: int(float(x)), result)) diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py index efaaf5a4..6bde2f19 100644 --- a/src/frostfs_testlib/steps/network.py +++ b/src/frostfs_testlib/steps/network.py @@ -4,16 +4,18 @@ from frostfs_testlib.storage.cluster import ClusterNode class IpHelper: @staticmethod - def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: + def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None: shell = node.host.get_shell() - for ip in block_ip: - shell.exec(f"ip route add blackhole {ip}") + for ip, table in block_ip: + if not table: + shell.exec(f"ip r a blackhole {ip}") + continue + shell.exec(f"ip r a blackhole {ip} table {table}") @staticmethod def restore_input_traffic_to_node(node: ClusterNode) -> None: shell = node.host.get_shell() - unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False)) - if unlock_ip.return_code != 0: - return - for ip in unlock_ip.stdout.strip().split("\n"): - shell.exec(f"ip route del blackhole {ip.split(' ')[1]}") + unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout + + for active_blackhole in unlock_ip.strip().split("\n"): + shell.exec(f"ip r d {active_blackhole}") diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index dd38279d..42b1fc52 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -13,6 +13,7 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.storage.cluster import Cluster, StorageNode +from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils import datetime_utils logger = logging.getLogger("NeoLogger") @@ -111,10 +112,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: storage_wallet_path = node.get_wallet_path() cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config) - return cli.netmap.snapshot( - rpc_endpoint=node.get_rpc_endpoint(), - wallet=storage_wallet_path, - ).stdout + return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout @reporter.step("Get shard list for {node}") @@ -202,12 +200,7 @@ def delete_node_data(node: StorageNode) -> None: @reporter.step("Exclude node {node_to_exclude} from network map") -def exclude_node_from_network_map( - node_to_exclude: StorageNode, - alive_node: StorageNode, - shell: Shell, - cluster: Cluster, -) -> None: +def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: node_netmap_key = node_to_exclude.get_wallet_public_key() storage_node_set_status(node_to_exclude, status="offline") @@ -221,12 +214,7 @@ def exclude_node_from_network_map( @reporter.step("Include node {node_to_include} into network map") -def include_node_to_network_map( - node_to_include: StorageNode, - alive_node: StorageNode, - shell: Shell, - cluster: Cluster, -) -> None: +def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: storage_node_set_status(node_to_include, status="online") # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. @@ -236,7 +224,7 @@ def include_node_to_network_map( tick_epoch(shell, cluster) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) - check_node_in_map(node_to_include, shell, alive_node) + await_node_in_map(node_to_include, shell, alive_node) @reporter.step("Check node {node} in network map") @@ -250,6 +238,11 @@ def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[Stor assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" +@wait_for_success(300, 15, title="Await node {node} in network map") +def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: + check_node_in_map(node, shell, alive_node) + + @reporter.step("Check node {node} NOT in network map") def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: alive_node = alive_node or node @@ -263,7 +256,7 @@ def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[ @reporter.step("Wait for node {node} is ready") def wait_for_node_to_be_ready(node: StorageNode) -> None: - timeout, attempts = 30, 6 + timeout, attempts = 60, 15 for _ in range(attempts): try: health_check = storage_node_healthcheck(node) @@ -276,12 +269,7 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None: @reporter.step("Remove nodes from network map trough cli-adm morph command") -def remove_nodes_from_map_morph( - shell: Shell, - cluster: Cluster, - remove_nodes: list[StorageNode], - alive_node: Optional[StorageNode] = None, -): +def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None): """ Move node to the Offline state in the candidates list and tick an epoch to update the netmap using frostfs-adm @@ -300,9 +288,5 @@ def remove_nodes_from_map_morph( if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) - frostfsadm = FrostfsAdm( - shell=remote_shell, - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - config_file=FROSTFS_ADM_CONFIG_PATH, - ) + frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfsadm.morph.remove_nodes(node_netmap_keys) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3_helper.py similarity index 76% rename from src/frostfs_testlib/steps/s3/s3_helper.py rename to src/frostfs_testlib/steps/s3_helper.py index baf362be..c3092df7 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3_helper.py @@ -6,9 +6,9 @@ from typing import Optional from dateutil.parser import parse from frostfs_testlib import reporter -from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus +from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container +from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo @@ -47,7 +47,6 @@ def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: Versi if status == VersioningStatus.UNDEFINED: return - s3_client.get_bucket_versioning_status(bucket) s3_client.put_bucket_versioning(bucket, status=status) bucket_status = s3_client.get_bucket_versioning_status(bucket) assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" @@ -120,32 +119,28 @@ def assert_object_lock_mode( ).days == retain_period, f"Expected retention period is {retain_period} days" -def assert_s3_acl(acl_grants: list, permitted_users: str): - if permitted_users == "AllUsers": - grantees = {"AllUsers": 0, "CanonicalUser": 0} - for acl_grant in acl_grants: - if acl_grant.get("Grantee", {}).get("Type") == "Group": - uri = acl_grant.get("Grantee", {}).get("URI") - permission = acl_grant.get("Permission") - assert (uri, permission) == ( - "http://acs.amazonaws.com/groups/global/AllUsers", - "FULL_CONTROL", - ), "All Groups should have FULL_CONTROL" - grantees["AllUsers"] += 1 - if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": - permission = acl_grant.get("Permission") - assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL" - grantees["CanonicalUser"] += 1 - assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL" - assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL" +def _format_grants_as_strings(grants: list[dict]) -> list: + grantee_format = "{g_type}::{uri}:{permission}" + return set( + [ + grantee_format.format( + g_type=grant.get("Grantee", {}).get("Type", ""), + uri=grant.get("Grantee", {}).get("URI", ""), + permission=grant.get("Permission", ""), + ) + for grant in grants + ] + ) - if permitted_users == "CanonicalUser": - for acl_grant in acl_grants: - if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": - permission = acl_grant.get("Permission") - assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL" - else: - logger.error("FULL_CONTROL is given to All Users") + +@reporter.step("Verify ACL permissions") +def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True): + actual_grants = _format_grants_as_strings(actual_acl_grants) + expected_grants = _format_grants_as_strings(expected_acl_grants) + + assert expected_grants <= actual_grants, "Permissions mismatch" + if strict: + assert expected_grants == actual_grants, "Extra permissions found, must not be there" @reporter.step("Delete bucket with all objects") @@ -180,11 +175,35 @@ def search_nodes_with_bucket( wallet: WalletInfo, shell: Shell, endpoint: str, + bucket_container_resolver: BucketContainerResolver, ) -> list[ClusterNode]: cid = None for cluster_node in cluster.cluster_nodes: - cid = search_container_by_name(name=bucket_name, node=cluster_node) + cid = bucket_container_resolver.resolve(cluster_node, bucket_name) if cid: break nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) return nodes_list + + +def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int: + if isinstance(value, int): + return value + + if "part" not in value and "object" not in value: + return int(value) + + if object_size is not None: + value = value.replace("object", str(object_size)) + + if part_size is not None: + value = value.replace("part", str(part_size)) + + return int(eval(value)) + + +def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int: + start, end = rng.split(":") + start = get_bytes_relative_to_object(start, object_size, part_size) + end = get_bytes_relative_to_object(end, object_size, part_size) + return (start, end) if int_values else f"bytes {start}-{end}/*" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 23130cb0..b67e34d1 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -11,6 +11,7 @@ from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.metrics import Metrics from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry @@ -24,11 +25,13 @@ class ClusterNode: class_registry: ServiceRegistry id: int host: Host + metrics: Metrics def __init__(self, host: Host, id: int) -> None: self.host = host self.id = id self.class_registry = get_service_registry() + self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint()) @property def host_ip(self): @@ -141,30 +144,16 @@ class ClusterNode: return self.host.config.interfaces[interface.value] def get_data_interfaces(self) -> list[str]: - return [ - ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface - ] + return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface] def get_data_interface(self, search_interface: str) -> list[str]: - return [ - self.host.config.interfaces[interface] - for interface in self.host.config.interfaces.keys() - if search_interface == interface - ] + return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_interface == interface] def get_internal_interfaces(self) -> list[str]: - return [ - ip_address - for name_interface, ip_address in self.host.config.interfaces.items() - if "internal" in name_interface - ] + return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "internal" in name_interface] def get_internal_interface(self, search_internal: str) -> list[str]: - return [ - self.host.config.interfaces[interface] - for interface in self.host.config.interfaces.keys() - if search_internal == interface - ] + return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_internal == interface] class Cluster: @@ -175,8 +164,6 @@ class Cluster: default_rpc_endpoint: str default_s3_gate_endpoint: str default_http_gate_endpoint: str - default_http_hostname: str - default_s3_hostname: str def __init__(self, hosting: Hosting) -> None: self._hosting = hosting @@ -185,8 +172,6 @@ class Cluster: self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint() self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint() self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint() - self.default_http_hostname = self.services(StorageNode)[0].get_http_hostname() - self.default_s3_hostname = self.services(StorageNode)[0].get_s3_hostname() @property def hosts(self) -> list[Host]: diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 3d759880..2e492083 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -5,6 +5,7 @@ class ConfigAttributes: WALLET_CONFIG = "wallet_config" CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" + WORKING_DIR = "working_dir" SHARD_CONFIG_PATH = "shard_config_path" LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" @@ -12,9 +13,18 @@ class ConfigAttributes: REMOTE_WALLET_CONFIG = "remote_wallet_config_path" ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" + ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_PROMETHEUS = "endpoint_prometheus" + ENDPOINT_PPROF = "endpoint_pprof" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" - HTTP_HOSTNAME = "http_hostname" - S3_HOSTNAME = "s3_hostname" + + +class PlacementRule: + DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" + SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" + REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" + REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X" + DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" + EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X" diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index e713f027..56282825 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -187,15 +187,19 @@ class BackgroundLoadController: read_from=self.load_params.read_from, registry_file=self.load_params.registry_file, verify_time=self.load_params.verify_time, + custom_registry=self.load_params.custom_registry, load_type=self.load_params.load_type, load_id=self.load_params.load_id, vu_init_time=0, working_dir=self.load_params.working_dir, endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, - setup_timeout="1s", + setup_timeout=self.load_params.setup_timeout, ) + if self.verification_params.custom_registry: + self.verification_params.registry_file = self.load_params.custom_registry + if self.verification_params.verify_time is None: raise RuntimeError("verify_time should not be none") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 03648f5e..51aaefbb 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,6 +1,7 @@ -import datetime +import itertools import logging import time +from datetime import datetime, timezone from typing import TypeVar import frostfs_testlib.resources.optionals as optionals @@ -14,10 +15,11 @@ from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_E from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IpHelper +from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success @@ -38,7 +40,8 @@ class ClusterStateController: def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} - self.dropped_traffic: list[ClusterNode] = [] + self.dropped_traffic: set[ClusterNode] = set() + self.excluded_from_netmap: list[StorageNode] = [] self.stopped_services: set[NodeBase] = set() self.cluster = cluster self.healthcheck = healthcheck @@ -170,6 +173,15 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Send sighup to all {service_type} services") + def sighup_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.send_signal_to_service for service in services], signal="SIGHUP") + + if service_type == StorageNode: + self.wait_after_storage_startup() + @wait_for_success(600, 60) def wait_s3gate(self, s3gate: S3Gate): with reporter.step(f"Wait for {s3gate} reconnection"): @@ -204,21 +216,27 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Stop {service_type} service on {node}") - def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): + def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True): service = node.service(service_type) service.stop_service(mask) self.stopped_services.add(service) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Send sighup to {service_type} service on {node}") + def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.send_signal_to_service("SIGHUP") + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Start {service_type} service on {node}") - def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): + def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass): service = node.service(service_type) service.start_service() self.stopped_services.discard(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Start all stopped {service_type} services") - def start_stopped_services_of_type(self, service_type: type[ServiceClass]): + def start_stopped_services_of_type(self, service_type: ServiceClass): stopped_svc = self._get_stopped_by_type(service_type) if not stopped_svc: return @@ -229,23 +247,20 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() - # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop all storage services on cluster") - def stop_all_storage_services(self, reversed_order: bool = False): - nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + @reporter.step("Restart {service_type} service on {node}") + def restart_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.restart_service() - for node in nodes: - self.stop_service_of_type(node, StorageNode) - - # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop all S3 gates on cluster") - def stop_all_s3_gates(self, reversed_order: bool = False): - nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + @reporter.step("Restart all {service_type} services") + def restart_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.restart_service for service in services]) - for node in nodes: - self.stop_service_of_type(node, S3Gate) + if service_type == StorageNode: + self.wait_after_storage_startup() # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -259,30 +274,6 @@ class ClusterStateController: def start_storage_service(self, node: ClusterNode): self.start_service_of_type(node, StorageNode) - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start stopped storage services") - def start_stopped_storage_services(self): - self.start_stopped_services_of_type(StorageNode) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop s3 gate on {node}") - def stop_s3_gate(self, node: ClusterNode, mask: bool = True): - self.stop_service_of_type(node, S3Gate, mask) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start s3 gate on {node}") - def start_s3_gate(self, node: ClusterNode): - self.start_service_of_type(node, S3Gate) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start stopped S3 gates") - def start_stopped_s3_gates(self): - self.start_stopped_services_of_type(S3Gate) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): @@ -307,30 +298,23 @@ class ClusterStateController: self.suspended_services = {} @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") - def drop_traffic( - self, - node: ClusterNode, - wakeup_timeout: int, - name_interface: str, - block_nodes: list[ClusterNode] = None, - ) -> None: - list_ip = self._parse_interfaces(block_nodes, name_interface) - IpHelper.drop_input_traffic_to_node(node, list_ip) + def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: + interfaces_tables = self._parse_interfaces(block_nodes, name_interface) + IpHelper.drop_input_traffic_to_node(node, interfaces_tables) time.sleep(wakeup_timeout) - self.dropped_traffic.append(node) + self.dropped_traffic.add(node) @reporter.step("Start traffic to {node}") - def restore_traffic( - self, - node: ClusterNode, - ) -> None: + def restore_traffic(self, node: ClusterNode) -> None: IpHelper.restore_input_traffic_to_node(node=node) + self.dropped_traffic.discard(node) @reporter.step("Restore blocked nodes") def restore_all_traffic(self): if not self.dropped_traffic: return parallel(self._restore_traffic_to_node, self.dropped_traffic) + self.dropped_traffic.clear() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Hard reboot host {node} via magic SysRq option") @@ -379,38 +363,38 @@ class ClusterStateController: @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: shell = node.host.get_shell() - return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") + return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") - @reporter.step("Set node time to {in_date}") + @reporter.step("Set time on nodes in {in_date}") + def change_date_on_all_nodes(self, cluster: Cluster, in_date: datetime) -> None: + parallel(self.change_node_date, cluster.cluster_nodes, in_date=in_date) + + @reporter.step("Set time on {node} to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() - shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") - shell.exec("hwclock --systohc") + in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") + shell.exec(f"timedatectl set-time '{in_date_frmt}'") node_time = self.get_node_date(node) - with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): - assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) - @reporter.step(f"Restore time") + with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): + assert (node_time - in_date).total_seconds() < 60 + + @reporter.step("Restore time on nodes") + def restore_date_on_all_nodes(self, cluster: Cluster) -> None: + parallel(self.restore_node_date, cluster.cluster_nodes) + + @reporter.step("Restore time on {node}") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() - now_time = datetime.datetime.now(datetime.timezone.utc) - with reporter.step(f"Set {now_time} time"): - shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") - shell.exec("hwclock --systohc") + now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") - @reporter.step("Change the synchronizer status to {status}") - def set_sync_date_all_nodes(self, status: str): - if status == "active": - parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) - return - parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) + with reporter.step(f"Set {now_time} time"): + shell.exec(f"timedatectl set-time '{now_time}'") @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: frostfs_adm = FrostfsAdm( - shell=cluster_node.host.get_shell(), - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - config_file=FROSTFS_ADM_CONFIG_PATH, + shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH ) frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") @@ -432,22 +416,46 @@ class ClusterStateController: if not await_tick: return - with reporter.step("Tick 1 epoch and await 2 block"): - frostfs_adm.morph.force_new_epoch() - time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) + with reporter.step("Tick 2 epoch with 2 block await."): + for _ in range(2): + frostfs_adm.morph.force_new_epoch() + time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) self.await_node_status(status, wallet, cluster_node) @wait_for_success(80, 8, title="Wait for node status become {status}") - def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode): + def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode, checker_node: ClusterNode = None): frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) - netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(cluster_node.storage_node.get_rpc_endpoint()).stdout) - netmap = [node for node in netmap if cluster_node.host_ip == node.node] + if not checker_node: + checker_node = cluster_node + netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) + netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node] if status == NodeStatus.OFFLINE: - assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" + assert ( + cluster_node.get_interface(Interfaces.MGMT) not in netmap + ), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline" else: assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" + def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None: + alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0] + remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage) + self.excluded_from_netmap.extend(removes_nodes) + + def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode): + include_node_to_network_map(include_node, alive_node, self.shell, self.cluster) + self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node)) + + def include_all_excluded_nodes(self): + if not self.excluded_from_netmap: + return + alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0] + if not alive_node: + return + + for exclude_node in self.excluded_from_netmap.copy(): + self.include_node_to_netmap(exclude_node, alive_node) + def _get_cli( self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode ) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: @@ -464,23 +472,9 @@ class ClusterStateController: frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) - frostfs_cli_remote = FrostfsCli( - shell=shell, - frostfs_cli_exec_path=FROSTFS_CLI_EXEC, - config_file=wallet_config_path, - ) + frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) return frostfs_adm, frostfs_cli, frostfs_cli_remote - def _enable_date_synchronizer(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("timedatectl set-ntp true") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15) - - def _disable_date_synchronizer(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("timedatectl set-ntp false") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15) - def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: disk_controller_id = DiskController.get_id(node, device) if disk_controller_id in self.detached_disks.keys(): @@ -490,17 +484,31 @@ class ClusterStateController: return disk_controller + @reporter.step("Restore traffic {node}") def _restore_traffic_to_node(self, node): IpHelper.restore_input_traffic_to_node(node) - def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str): - interfaces = [] + def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]: + interfaces_and_tables = set() for node in nodes: - dict_interfaces = node.host.config.interfaces - for type, ip in dict_interfaces.items(): - if name_interface in type: - interfaces.append(ip) - return interfaces + shell = node.host.get_shell() + lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines() + + ips = [] + tables = [] + + for line in lines: + if "src" not in line or "table local" in line: + continue + parts = line.split() + ips.append(parts[-1]) + if "table" in line: + tables.append(parts[parts.index("table") + 1]) + tables.append(None) + + [interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)] + + return interfaces_and_tables @reporter.step("Ping node") def _ping_host(self, node: ClusterNode): @@ -528,3 +536,8 @@ class ClusterStateController: except Exception as err: logger.warning(f"Host ping fails with error {err}") return HostStatus.ONLINE + + @reporter.step("Get contract by domain - {domain_name}") + def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): + frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC) + return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index 3d313f18..50174066 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -2,22 +2,22 @@ import json from typing import Any from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards +from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.test_control import wait_for_success class ShardsWatcher: - shards_snapshots: list[dict[str, Any]] = [] - def __init__(self, node_under_test: ClusterNode) -> None: + self.shards_snapshots: list[dict[str, Any]] = [] self.storage_node = node_under_test.storage_node self.take_shards_snapshot() - def take_shards_snapshot(self): + def take_shards_snapshot(self) -> None: snapshot = self.get_shards_snapshot() self.shards_snapshots.append(snapshot) - def get_shards_snapshot(self): + def get_shards_snapshot(self) -> dict[str, Any]: shards_snapshot: dict[str, Any] = {} shards = self.get_shards() @@ -26,17 +26,17 @@ class ShardsWatcher: return shards_snapshot - def _get_current_snapshot(self): + def _get_current_snapshot(self) -> dict[str, Any]: return self.shards_snapshots[-1] - def _get_previous_snapshot(self): + def _get_previous_snapshot(self) -> dict[str, Any]: return self.shards_snapshots[-2] - def _is_shard_present(self, shard_id): + def _is_shard_present(self, shard_id) -> bool: snapshot = self._get_current_snapshot() return shard_id in snapshot - def get_shards_with_new_errors(self): + def get_shards_with_new_errors(self) -> dict[str, Any]: current_snapshot = self._get_current_snapshot() previous_snapshot = self._get_previous_snapshot() shards_with_new_errors: dict[str, Any] = {} @@ -46,7 +46,7 @@ class ShardsWatcher: return shards_with_new_errors - def get_shards_with_errors(self): + def get_shards_with_errors(self) -> dict[str, Any]: snapshot = self.get_shards_snapshot() shards_with_errors: dict[str, Any] = {} for shard_id, shard in snapshot.items(): @@ -55,7 +55,7 @@ class ShardsWatcher: return shards_with_errors - def get_shard_status(self, shard_id: str): + def get_shard_status(self, shard_id: str): # -> Any: snapshot = self.get_shards_snapshot() assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}" @@ -63,18 +63,18 @@ class ShardsWatcher: return snapshot[shard_id]["mode"] @wait_for_success(60, 2) - def await_for_all_shards_status(self, status: str): + def await_for_all_shards_status(self, status: str) -> None: snapshot = self.get_shards_snapshot() for shard_id in snapshot: assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status" @wait_for_success(60, 2) - def await_for_shard_status(self, shard_id: str, status: str): + def await_for_shard_status(self, shard_id: str, status: str) -> None: assert self.get_shard_status(shard_id) == status @wait_for_success(60, 2) - def await_for_shard_have_new_errors(self, shard_id: str): + def await_for_shard_have_new_errors(self, shard_id: str) -> None: self.take_shards_snapshot() assert self._is_shard_present(shard_id) shards_with_new_errors = self.get_shards_with_new_errors() @@ -82,7 +82,7 @@ class ShardsWatcher: assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" @wait_for_success(300, 5) - def await_for_shards_have_no_new_errors(self): + def await_for_shards_have_no_new_errors(self) -> None: self.take_shards_snapshot() shards_with_new_errors = self.get_shards_with_new_errors() assert len(shards_with_new_errors) == 0 @@ -102,7 +102,7 @@ class ShardsWatcher: return json.loads(response.stdout.split(">", 1)[1]) - def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True): + def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True) -> CommandResult: shards_cli = FrostfsCliShards( self.storage_node.host.get_shell(), self.storage_node.host.get_cli_config("frostfs-cli").exec_path, diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py index 66f72d6d..f0b2a215 100644 --- a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -14,14 +14,19 @@ class ConfigStateManager(StateManager): self.cluster = self.csc.cluster @reporter.step("Change configuration for {service_type} on all nodes") - def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): + def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False): services = self.cluster.services(service_type) nodes = self.cluster.nodes(services) self.services_with_changed_config.update([(node, service_type) for node in nodes]) - self.csc.stop_services_of_type(service_type) + if not sighup: + self.csc.stop_services_of_type(service_type) + parallel([node.config(service_type).set for node in nodes], values=values) - self.csc.start_services_of_type(service_type) + if not sighup: + self.csc.start_services_of_type(service_type) + else: + self.csc.sighup_services_of_type(service_type) @reporter.step("Change configuration for {service_type} on {node}") def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): @@ -32,18 +37,26 @@ class ConfigStateManager(StateManager): self.csc.start_service_of_type(node, service_type) @reporter.step("Revert all configuration changes") - def revert_all(self): + def revert_all(self, sighup: bool = False): if not self.services_with_changed_config: return - parallel(self._revert_svc, self.services_with_changed_config) + parallel(self._revert_svc, self.services_with_changed_config, sighup) self.services_with_changed_config.clear() - self.csc.start_all_stopped_services() + if not sighup: + self.csc.start_all_stopped_services() # TODO: parallel can't have multiple parallel_items :( @reporter.step("Revert all configuration {node_and_service}") - def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): + def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False): node, service_type = node_and_service - self.csc.stop_service_of_type(node, service_type) + service = node.service(service_type) + + if not sighup: + self.csc.stop_service_of_type(node, service_type) + node.config(service_type).revert() + + if sighup: + service.send_signal_to_service("SIGHUP") diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py new file mode 100644 index 00000000..11994356 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -0,0 +1,154 @@ +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Optional + +from frostfs_testlib.testing.readable import HumanReadableEnum +from frostfs_testlib.utils import string_utils + +logger = logging.getLogger("NeoLogger") +EACL_LIFETIME = 100500 +FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 + + +class ObjectOperations(HumanReadableEnum): + PUT = "object.put" + PATCH = "object.patch" + GET = "object.get" + HEAD = "object.head" + GET_RANGE = "object.range" + GET_RANGE_HASH = "object.hash" + SEARCH = "object.search" + DELETE = "object.delete" + WILDCARD_ALL = "object.*" + + @staticmethod + def get_all(): + return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] + + +class ContainerOperations(HumanReadableEnum): + PUT = "container.put" + GET = "container.get" + LIST = "container.list" + DELETE = "container.delete" + WILDCARD_ALL = "container.*" + + @staticmethod + def get_all(): + return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] + + +@dataclass +class Operations: + GET_CONTAINER = "GetContainer" + PUT_CONTAINER = "PutContainer" + DELETE_CONTAINER = "DeleteContainer" + LIST_CONTAINER = "ListContainers" + GET_OBJECT = "GetObject" + DELETE_OBJECT = "DeleteObject" + HASH_OBJECT = "HashObject" + RANGE_OBJECT = "RangeObject" + SEARCH_OBJECT = "SearchObject" + HEAD_OBJECT = "HeadObject" + PUT_OBJECT = "PutObject" + PATCH_OBJECT = "PatchObject" + + +class Verb(HumanReadableEnum): + ALLOW = "allow" + DENY = "deny" + + +class Role(HumanReadableEnum): + OWNER = "owner" + IR = "ir" + CONTAINER = "container" + OTHERS = "others" + + +class ConditionType(HumanReadableEnum): + RESOURCE = "ResourceCondition" + REQUEST = "RequestCondition" + + +# See https://git.frostfs.info/TrueCloudLab/policy-engine/src/branch/master/schema/native/consts.go#L40-L53 +class ConditionKey(HumanReadableEnum): + ROLE = '"\\$Actor:role"' + PUBLIC_KEY = '"\\$Actor:publicKey"' + OBJECT_TYPE = '"\\$Object:objectType"' + OBJECT_ID = '"\\$Object:objectID"' + + +class MatchType(HumanReadableEnum): + EQUAL = "=" + NOT_EQUAL = "!=" + + +@dataclass +class Condition: + condition_key: ConditionKey | str + condition_value: str + condition_type: ConditionType = ConditionType.REQUEST + match_type: MatchType = MatchType.EQUAL + + def as_string(self): + key = self.condition_key.value if isinstance(self.condition_key, ConditionKey) else self.condition_key + value = self.condition_value.value if isinstance(self.condition_value, Enum) else self.condition_value + + return f"{self.condition_type.value}:{key}{self.match_type.value}{value}" + + @staticmethod + def by_role(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.ROLE, *args, **kwargs) + + @staticmethod + def by_key(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs) + + @staticmethod + def by_object_type(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.OBJECT_TYPE, *args, **kwargs) + + @staticmethod + def by_object_id(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.OBJECT_ID, *args, **kwargs) + + +class Rule: + def __init__( + self, + access: Verb, + operations: list[ObjectOperations] | ObjectOperations, + conditions: list[Condition] | Condition = None, + chain_id: Optional[str] = None, + ) -> None: + self.access = access + self.operations = operations + + if not conditions: + self.conditions = [] + elif isinstance(conditions, Condition): + self.conditions = [conditions] + else: + self.conditions = conditions + + if not isinstance(self.conditions, list): + raise RuntimeError("Conditions must be a list") + + if not operations: + self.operations = [] + elif isinstance(operations, (ObjectOperations, ContainerOperations)): + self.operations = [operations] + else: + self.operations = operations + + if not isinstance(self.operations, list): + raise RuntimeError("Operations must be a list") + + self.chain_id = chain_id if chain_id else string_utils.unique_name("chain-id-") + + def as_string(self): + conditions = " ".join([cond.as_string() for cond in self.conditions]) + operations = " ".join([op.value for op in self.operations]) + return f"{self.access.value} {operations} {conditions} *" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 9e671d5e..4f5c3489 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -39,12 +39,18 @@ class S3Gate(NodeBase): def get_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) + def get_ns_endpoint(self, ns_name: str) -> str: + return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name) + def get_all_endpoints(self) -> list[str]: return [ self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), ] + def get_ns_endpoint(self, ns_name: str) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name) + def service_healthcheck(self) -> bool: health_metric = "frostfs_s3_gw_state_health" output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout @@ -154,15 +160,6 @@ class StorageNode(NodeBase): def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) - def get_storage_config(self) -> str: - return self.host.get_storage_config(self.name) - - def get_http_hostname(self) -> list[str]: - return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) - - def get_s3_hostname(self) -> list[str]: - return self._get_attribute(ConfigAttributes.S3_HOSTNAME) - def delete_blobovnicza(self): self.host.delete_blobovnicza(self.name) diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py new file mode 100644 index 00000000..89690151 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -0,0 +1,80 @@ +import time +from functools import wraps +from typing import Callable + +import pytest + +from frostfs_testlib.hosting import Host +from frostfs_testlib.shell.interfaces import CommandResult + + +class Metrics: + def __init__(self, host: Host, metrics_endpoint: str) -> None: + self.storage = StorageMetrics(host, metrics_endpoint) + + +class StorageMetrics: + """ + Class represents storage metrics in a cluster + """ + + def __init__(self, host: Host, metrics_endpoint: str) -> None: + self.host = host + self.metrics_endpoint = metrics_endpoint + + def get_metrics_search_by_greps(self, **greps) -> CommandResult: + """ + Get a metrics, search by: cid, metric_type, shard_id etc. + Args: + greps: dict of grep-command-name and value + for example get_metrics_search_by_greps(command='container_objects_total', cid='123456') + Return: + result of metrics + """ + shell = self.host.get_shell() + additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) + result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") + return result + + def get_all_metrics(self) -> CommandResult: + shell = self.host.get_shell() + result = shell.exec(f"curl -s {self.metrics_endpoint}") + return result + + +def wait_until_metric_result_is_stable( + relative_deviation: float = None, absolute_deviation: int = None, max_attempts: int = 10, sleep_interval: int = 30 +): + """ + A decorator function that repeatedly calls the decorated function until its result stabilizes + within a specified relative tolerance or until the maximum number of attempts is reached. + + This decorator is useful for scenarios where a function returns a metric or value that may fluctuate + over time, and you want to ensure that the result has stabilized before proceeding. + """ + + def decorator(func: Callable): + @wraps(func) + def wrapper(*args, **kwargs): + last_result = None + for _ in range(max_attempts): + # first function call + first_result = func(*args, **kwargs) + + # waiting before the second call + time.sleep(sleep_interval) + + # second function call + last_result = func(*args, **kwargs) + + # checking value stability + if first_result == pytest.approx(last_result, rel=relative_deviation, abs=absolute_deviation): + return last_result + + # if stability is not achieved, return the last value + if last_result is not None: + return last_result + + return wrapper + + return decorator diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 82913452..5c8b7233 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -65,6 +65,10 @@ class NodeBase(HumanReadableABC): with reporter.step(f"Start {self.name} service on {self.host.config.address}"): self.host.start_service(self.name) + def send_signal_to_service(self, signal: str): + with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"): + self.host.send_signal_to_service(self.name, signal) + @abstractmethod def service_healthcheck(self) -> bool: """Service healthcheck.""" @@ -78,6 +82,9 @@ class NodeBase(HumanReadableABC): def get_metrics_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) + def get_pprof_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF) + def stop_service(self, mask: bool = True): if mask: with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): @@ -140,6 +147,13 @@ class NodeBase(HumanReadableABC): else None ) + def get_working_dir_path(self) -> Optional[str]: + """ + Returns working directory path located on remote host + """ + config_attributes = self.host.get_service_config(self.name) + return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None + @property def config_dir(self) -> str: return self._get_attribute(ConfigAttributes.CONFIG_DIR) @@ -185,9 +199,7 @@ class NodeBase(HumanReadableABC): if attribute_name not in config.attributes: if default_attribute_name is None: - raise RuntimeError( - f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either" - ) + raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either") return config.attributes[default_attribute_name] @@ -197,9 +209,7 @@ class NodeBase(HumanReadableABC): return self.host.get_service_config(self.name) def get_service_uptime(self, service: str) -> datetime: - result = self.host.get_shell().exec( - f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2" - ) + result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2") start_time = parser.parse(result.stdout.strip()) current_time = datetime.now(tz=timezone.utc) active_time = current_time - start_time diff --git a/src/frostfs_testlib/storage/dataclasses/policy.py b/src/frostfs_testlib/storage/dataclasses/policy.py new file mode 100644 index 00000000..872ee05e --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/policy.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass + + +@dataclass +class PlacementPolicy: + name: str + value: str + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return self.__str__() diff --git a/src/frostfs_testlib/storage/dataclasses/shard.py b/src/frostfs_testlib/storage/dataclasses/shard.py index 170a477b..bebdbf5f 100644 --- a/src/frostfs_testlib/storage/dataclasses/shard.py +++ b/src/frostfs_testlib/storage/dataclasses/shard.py @@ -56,9 +56,7 @@ class Shard: var_prefix = f"{SHARD_PREFIX}{shard_id}" blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id) - blobstors = [ - Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count) - ] + blobstors = [Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)] write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED") @@ -71,7 +69,13 @@ class Shard: @staticmethod def from_object(shard): metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"] + writecache_enabled = True + if "enabled" in shard["writecache"]: + writecache_enabled = shard["writecache"]["enabled"] + writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"] + if not writecache_enabled: + writecache = "" # Currently due to issue we need to check if pilorama exists in keys # TODO: make pilorama mandatory after fix diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 28fdaa52..4c303fcd 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,6 +1,9 @@ +import re from dataclasses import dataclass from typing import Optional +from pydantic import BaseModel, Field, field_validator + from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum @@ -70,8 +73,55 @@ class NodeNetInfo: epoch_duration: str = None inner_ring_candidate_fee: str = None maximum_object_size: str = None + maximum_count_of_data_shards: str = None + maximum_count_of_parity_shards: str = None withdrawal_fee: str = None homomorphic_hashing_disabled: str = None maintenance_mode_allowed: str = None - eigen_trust_alpha: str = None - eigen_trust_iterations: str = None + + +class Attributes(BaseModel): + cluster_name: str = Field(alias="ClusterName") + continent: str = Field(alias="Continent") + country: str = Field(alias="Country") + country_code: str = Field(alias="CountryCode") + external_addr: list[str] = Field(alias="ExternalAddr") + location: str = Field(alias="Location") + node: str = Field(alias="Node") + subdiv: str = Field(alias="SubDiv") + subdiv_code: str = Field(alias="SubDivCode") + un_locode: str = Field(alias="UN-LOCODE") + role: str = Field(alias="role") + + @field_validator("external_addr", mode="before") + @classmethod + def convert_external_addr(cls, value: str) -> list[str]: + return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)] + + +class NodeInfo(BaseModel): + public_key: str = Field(alias="publicKey") + addresses: list[str] = Field(alias="addresses") + state: str = Field(alias="state") + attributes: Attributes = Field(alias="attributes") + + @field_validator("addresses", mode="before") + @classmethod + def convert_external_addr(cls, value: str) -> list[str]: + return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))] + + +@dataclass +class Chunk: + def __init__(self, object_id: str, required_nodes: list, confirmed_nodes: list, ec_parent_object_id: str, ec_index: int) -> None: + self.object_id = object_id + self.required_nodes = required_nodes + self.confirmed_nodes = confirmed_nodes + self.ec_parent_object_id = ec_parent_object_id + self.ec_index = ec_index + + def __str__(self) -> str: + return self.object_id + + def __repr__(self) -> str: + return self.object_id diff --git a/src/frostfs_testlib/steps/http/__init__.py b/src/frostfs_testlib/storage/grpc_operations/__init__.py similarity index 100% rename from src/frostfs_testlib/steps/http/__init__.py rename to src/frostfs_testlib/storage/grpc_operations/__init__.py diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py new file mode 100644 index 00000000..c1e3a310 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py @@ -0,0 +1,14 @@ +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper + + +class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli) + self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli) + self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli) + + +class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper): + pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py new file mode 100644 index 00000000..18e8ae58 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py @@ -0,0 +1,4 @@ +from .chunks import ChunksOperations +from .container import ContainerOperations +from .netmap import NetmapOperations +from .object import ObjectOperations diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py new file mode 100644 index 00000000..0d787e24 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -0,0 +1,165 @@ +import json +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils.cli_utils import parse_netmap_output + + +class ChunksOperations(interfaces.ChunksInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + @reporter.step("Search node without chunks") + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + if not endpoint: + endpoint = cluster.default_rpc_endpoint + netmap = parse_netmap_output(self.cli.netmap.snapshot(endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) + chunks_node_key = [] + for chunk in chunks: + chunks_node_key.extend(chunk.confirmed_nodes) + for node_info in netmap.copy(): + if node_info.node_id in chunks_node_key and node_info in netmap: + netmap.remove(node_info) + result = [] + for node_info in netmap: + for cluster_node in cluster.cluster_nodes: + if node_info.node == cluster_node.get_interface(Interfaces.MGMT): + result.append(cluster_node) + return result + + @reporter.step("Search node with chunk {chunk}") + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + netmap = parse_netmap_output(self.cli.netmap.snapshot(cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) + for node_info in netmap: + if node_info.node_id in chunk.confirmed_nodes: + for cluster_node in cluster.cluster_nodes: + if cluster_node.get_interface(Interfaces.MGMT) == node_info.node: + return (cluster_node, node_info) + + @wait_for_success(300, 5, fail_testcase=None) + @reporter.step("Search shard with chunk {chunk}") + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}" + node_shell = node.storage_node.host.get_shell() + shards_watcher = ShardsWatcher(node) + + with reporter.step("Search object file"): + for shard_id, shard_info in shards_watcher.shards_snapshots[-1].items(): + check_dir = node_shell.exec(f" [ -d {shard_info['blobstor'][1]['path']}/{oid_path} ] && echo 1 || echo 0").stdout + if "1" in check_dir.strip(): + return shard_id + + @reporter.step("Get all chunks") + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = True, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0]) + + @reporter.step("Get last parity chunk") + def get_parity( + self, + rpc_endpoint: str, + cid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = True, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1] + + @reporter.step("Get first data chunk") + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + oid: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = True, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> Chunk: + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] + + def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: + parse_result = json.loads(object_nodes) + if parse_result.get("errors"): + raise RuntimeError(", ".join(parse_result["errors"])) + return [Chunk(**chunk) for chunk in parse_result["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py new file mode 100644 index 00000000..75af00c3 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -0,0 +1,327 @@ +import json +import logging +import re +from typing import List, Optional, Union + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.clients.s3 import BucketContainerResolver +from frostfs_testlib.plugins import load_plugin +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.utils import json_utils + +logger = logging.getLogger("NeoLogger") + + +class ContainerOperations(interfaces.ContainerInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + @reporter.step("Create Container") + def create( + self, + endpoint: str, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, + attributes: Optional[dict] = None, + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, + name: Optional[str] = None, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + A wrapper for `frostfs-cli container create` call. + + Args: + wallet (WalletInfo): a wallet on whose behalf a container is created + rule (optional, str): placement rule for container + basic_acl (optional, str): an ACL for container, will be + appended to `--basic-acl` key + attributes (optional, dict): container attributes , will be + appended to `--attributes` key + session_token (optional, str): a path to session token file + session_wallet(optional, str): a path to the wallet which signed + the session token; this parameter makes sense + when paired with `session_token` + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + options (optional, dict): any other options to pass to the call + name (optional, str): container name attribute + await_mode (bool): block execution until container is persisted + wait_for_creation (): Wait for container shows in container list + timeout: Timeout for the operation. + + Returns: + (str): CID of the created container + """ + result = self.cli.container.create( + rpc_endpoint=endpoint, + policy=policy, + nns_zone=nns_zone, + nns_name=nns_name, + address=address, + attributes=attributes, + basic_acl=basic_acl, + await_mode=await_mode, + disable_timestamp=disable_timestamp, + force=force, + trace=trace, + name=name, + nonce=nonce, + session=session, + subnet=subnet, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + + cid = self._parse_cid(result.stdout) + + logger.info("Container created; waiting until it is persisted in the sidechain") + + return cid + + @reporter.step("List Containers") + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + **params, + ) -> List[str]: + """ + A wrapper for `frostfs-cli container list` call. It returns all the + available containers for the given wallet. + Args: + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + result = self.cli.container.list( + rpc_endpoint=endpoint, + name=name, + address=address, + generate_key=generate_key, + owner=owner, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + **params, + ) + return result.stdout.split() + + @reporter.step("List Objects in container") + def list_objects( + self, + endpoint: str, + cid: str, + bearer: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[str]: + """ + A wrapper for `frostfs-cli container list-objects` call. It returns all the + available objects in container. + Args: + container_id: cid of container + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + result = self.cli.container.list_objects( + rpc_endpoint=endpoint, + cid=cid, + bearer=bearer, + wallet=wallet, + address=address, + generate_key=generate_key, + trace=trace, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + logger.info(f"Container objects: \n{result}") + return result.stdout.split() + + @reporter.step("Delete container") + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ): + return self.cli.container.delete( + rpc_endpoint=endpoint, + cid=cid, + address=address, + await_mode=await_mode, + session=session, + ttl=ttl, + xhdr=xhdr, + force=force, + trace=trace, + ).stdout + + @reporter.step("Get container") + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> Union[dict, str]: + result = self.cli.container.get( + rpc_endpoint=endpoint, + cid=cid, + address=address, + generate_key=generate_key, + await_mode=await_mode, + to=to, + json_mode=json_mode, + trace=trace, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + container_info = json.loads(result.stdout) + attributes = dict() + for attr in container_info["attributes"]: + attributes[attr["key"]] = attr["value"] + container_info["attributes"] = attributes + container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"]) + return container_info + + @reporter.step("Get eacl container") + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.container.get_eacl( + rpc_endpoint=endpoint, + cid=cid, + address=address, + generate_key=generate_key, + await_mode=await_mode, + to=to, + session=session, + ttl=ttl, + xhdr=xhdr, + timeout=CLI_DEFAULT_TIMEOUT, + ).stdout + + @reporter.step("Get nodes container") + def nodes( + self, + endpoint: str, + cid: str, + cluster: Cluster, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[ClusterNode]: + result = self.cli.container.search_node( + rpc_endpoint=endpoint, + cid=cid, + address=address, + ttl=ttl, + from_file=from_file, + trace=trace, + short=short, + xhdr=xhdr, + generate_key=generate_key, + timeout=timeout, + ).stdout + + pattern = r"[0-9]+(?:\.[0-9]+){3}" + nodes_ip = list(set(re.findall(pattern, result))) + + with reporter.step(f"nodes ips = {nodes_ip}"): + nodes_list = cluster.get_nodes_by_ip(nodes_ip) + + with reporter.step(f"Return nodes - {nodes_list}"): + return nodes_list + + @reporter.step("Resolve container by name") + def resolve_container_by_name(name: str, node: ClusterNode): + resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) + resolver: BucketContainerResolver = resolver_cls() + return resolver.resolve(node, name) + + def _parse_cid(self, output: str) -> str: + """ + Parses container ID from a given CLI output. The input string we expect: + container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN + awaiting... + container has been persisted on sidechain + We want to take 'container ID' value from the string. + + Args: + output (str): CLI output to parse + + Returns: + (str): extracted CID + """ + try: + # taking first line from command's output + first_line = output.split("\n")[0] + except Exception: + first_line = "" + logger.error(f"Got empty output: {output}") + splitted = first_line.split(": ") + if len(splitted) != 2: + raise ValueError(f"no CID was parsed from command output: \t{first_line}") + return splitted[1] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py new file mode 100644 index 00000000..76ee69a3 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py @@ -0,0 +1,171 @@ +import json as module_json +from typing import List, Optional + +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.cli.netmap_parser import NetmapParser +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo + +from .. import interfaces + + +class NetmapOperations(interfaces.NetmapInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + def epoch( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> int: + """ + Get current epoch number. + """ + output = ( + self.cli.netmap.epoch( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return int(output) + + def netinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetInfo: + """ + Get target node info. + """ + output = ( + self.cli.netmap.netinfo( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.netinfo(output) + + def nodeinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> NodeInfo: + """ + Get target node info. + """ + output = ( + self.cli.netmap.nodeinfo( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + json=json, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.node_info(module_json.loads(output)) + + def snapshot( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[NodeNetmapInfo]: + """ + Get target node info. + """ + output = ( + self.cli.netmap.snapshot( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.snapshot_all_nodes(output) + + def snapshot_one_node( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[NodeNetmapInfo]: + """ + Get target one node info. + """ + output = ( + self.cli.netmap.snapshot( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.snapshot_one_node(output, rpc_endpoint) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py new file mode 100644 index 00000000..be8a4701 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -0,0 +1,708 @@ +import json +import logging +import os +import re +import uuid +from typing import Any, Optional + +from frostfs_testlib import reporter, utils +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils import cli_utils, file_utils + +logger = logging.getLogger("NeoLogger") + + +class ObjectOperations(interfaces.ObjectInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + self.chunks: interfaces.ChunksInterface = ChunksOperations(self.cli) + + @reporter.step("Delete object") + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + DELETE an Object. + + Args: + cid: ID of Container where we get the Object from + oid: ID of Object we are going to delete + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): Tombstone ID + """ + result = self.cli.object.delete( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + id_str = result.stdout.split("\n")[1] + tombstone = id_str.split(":")[1] + return tombstone.strip() + + @reporter.step("Get object") + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> file_utils.TestFile: + """ + GET from FrostFS. + + Args: + cid (str): ID of Container where we get the Object from + oid (str): Object ID + bearer: path to Bearer Token file, appends to `--bearer` key + write_object: path to downloaded file, appends to `--file` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + if not write_object: + write_object = str(uuid.uuid4()) + test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, write_object)) + + self.cli.object.get( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + file=test_file, + bearer=bearer, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + return test_file + + @reporter.step("Get object from random node") + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + GET from FrostFS random storage node + + Args: + cid: ID of Container where we get the Object from + oid: Object ID + cluster: cluster object + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + write_object (optional, str): path to downloaded file, appends to `--file` key + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return self.get( + cid, + oid, + endpoint, + bearer, + write_object, + xhdr, + no_progress, + session, + timeout, + ) + + @reporter.step("Get hash object") + def hash( + self, + rpc_endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + Get object hash. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + generate_key: Generate new private key. + oid: Object ID. + range: Range to take hash from in the form offset1:length1,... + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + salt: Salt in hex format. + ttl: TTL value in request meta header (default 2). + session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. + hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). + + Returns: + Command's result. + """ + result = self.cli.object.hash( + rpc_endpoint=rpc_endpoint, + cid=cid, + oid=oid, + address=address, + bearer=bearer, + generate_key=generate_key, + range=range, + salt=salt, + ttl=ttl, + xhdr=xhdr, + session=session, + hash_type=hash_type, + timeout=timeout, + ) + + if range: + # Cut off the range and return only hash + return result.stdout.split(":")[1].strip() + + return result.stdout + + @reporter.step("Head object") + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> CommandResult | Any: + """ + HEAD an Object. + + Args: + cid (str): ID of Container where we get the Object from + oid (str): ObjectID to HEAD + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + endpoint(optional, str): FrostFS endpoint to send request to + json_output(optional, bool): return response in JSON format or not; this flag + turns into `--json` key + is_raw(optional, bool): send "raw" request or not; this flag + turns into `--raw` key + is_direct(optional, bool): send request directly to the node or not; this flag + turns into `--ttl 1` key + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + depending on the `json_output` parameter value, the function returns + (dict): HEAD response in JSON format + or + (str): HEAD response as a plain text + """ + result = self.cli.object.head( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + json_mode=json_output, + raw=is_raw, + ttl=1 if is_direct else None, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + if not json_output: + return result + + try: + decoded = json.loads(result.stdout) + except Exception as exc: + # If we failed to parse output as JSON, the cause might be + # the plain text string in the beginning of the output. + # Here we cut off first string and try to parse again. + logger.info(f"failed to parse output: {exc}") + logger.info("parsing output in another way") + fst_line_idx = result.stdout.find("\n") + decoded = json.loads(result.stdout[fst_line_idx:]) + + # if response + if "chunks" in decoded.keys(): + logger.info("decoding ec chunks") + return decoded["chunks"] + + # If response is Complex Object header, it has `splitId` key + if "splitId" in decoded.keys(): + logger.info("decoding split header") + return utils.json_utils.decode_split_header(decoded) + + # If response is Last or Linking Object header, + # it has `header` dictionary and non-null `split` dictionary + if "split" in decoded["header"].keys(): + if decoded["header"]["split"]: + logger.info("decoding linking object") + return utils.json_utils.decode_linking_object(decoded) + + if decoded["header"]["objectType"] == "STORAGE_GROUP": + logger.info("decoding storage group") + return utils.json_utils.decode_storage_group(decoded) + + if decoded["header"]["objectType"] == "TOMBSTONE": + logger.info("decoding tombstone") + return utils.json_utils.decode_tombstone(decoded) + + logger.info("decoding simple header") + return utils.json_utils.decode_simple_header(decoded) + + @reporter.step("Lock Object") + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + Locks object in container. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + oid: Object ID. + lifetime: Lock lifetime. + expire_at: Lock expiration epoch. + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation. + + Returns: + Lock object ID + """ + result = self.cli.object.lock( + rpc_endpoint=endpoint, + lifetime=lifetime, + expire_at=expire_at, + address=address, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + ttl=ttl, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[0] + oid = id_str.split(":")[1] + return oid.strip() + + @reporter.step("Put object") + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + PUT of given file. + + Args: + path: path to file to be PUT + cid: ID of Container where we get the Object from + bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call + attributes: User attributes in form of Key1=Value1,Key2=Value2 + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): ID of uploaded Object + """ + result = self.cli.object.put( + rpc_endpoint=endpoint, + file=path, + cid=cid, + attributes=attributes, + bearer=bearer, + copies_number=copies_number, + expire_at=expire_at, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[-2] + oid = id_str.split(":")[1] + return oid.strip() + + @reporter.step("Patch object") + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: list[str] = None, + payloads: list[str] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + trace: bool = False, + ) -> str: + """ + PATCH an object. + + Args: + cid: ID of Container where we get the Object from + oid: Object ID + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] + payloads: An array of file paths to be applied in each range + new_attrs: Attributes to be changed in the format "key1=value1,key2=value2" + replace_attrs: Replace all attributes completely with new ones specified in new_attrs + bearer: Path to Bearer Token file, appends to `--bearer` key + xhdr: Request X-Headers in form of Key=Value + session: Path to a JSON-encoded container session token + timeout: Timeout for the operation + trace: Generate trace ID and print it + Returns: + (str): ID of patched Object + """ + result = self.cli.object.patch( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=ranges, + payload=payloads, + new_attrs=new_attrs, + replace_attrs=replace_attrs, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + trace=trace, + ) + return result.stdout.split(":")[1].strip() + + @reporter.step("Put object to random node") + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + PUT of given file to a random storage node. + + Args: + path: path to file to be PUT + cid: ID of Container where we get the Object from + cluster: cluster under test + bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call + attributes: User attributes in form of Key1=Value1,Key2=Value2 + cluster: cluster under test + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + ID of uploaded Object + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return self.put( + path, + cid, + endpoint, + bearer, + copies_number, + attributes, + xhdr, + expire_at, + no_progress, + session, + timeout=timeout, + ) + + @reporter.step("Get Range") + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> tuple[file_utils.TestFile, bytes]: + """ + GETRANGE an Object. + + Args: + wallet: wallet on whose behalf GETRANGE is done + cid: ID of Container where we get the Object from + oid: ID of Object we are going to request + range_cut: range to take data from in the form offset:length + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + bearer: path to Bearer Token file, appends to `--bearer` key + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str, bytes) - path to the file with range content and content of this file as bytes + """ + test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) + + self.cli.object.range( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=range_cut, + file=test_file, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + with open(test_file, "rb") as file: + content = file.read() + return test_file, content + + @reporter.step("Search object") + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + oid: Optional[str] = None, + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, + ) -> list: + """ + SEARCH an Object. + + Args: + wallet: wallet on whose behalf SEARCH is done + cid: ID of Container where we get the Object from + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + filters: key=value pairs to filter Objects + expected_objects_list: a list of ObjectIDs to compare found Objects with + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + phy: Search physically stored objects. + root: Search for user objects. + timeout: Timeout for the operation. + + Returns: + list of found ObjectIDs + """ + result = self.cli.object.search( + rpc_endpoint=endpoint, + cid=cid, + bearer=bearer, + oid=oid, + xhdr=xhdr, + filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, + session=session, + phy=phy, + root=root, + address=address, + generate_key=generate_key, + ttl=ttl, + timeout=timeout, + ) + + found_objects = re.findall(r"(\w{43,44})", result.stdout) + + if expected_objects_list: + if sorted(found_objects) == sorted(expected_objects_list): + logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") + else: + logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") + + return found_objects + + @wait_for_success() + @reporter.step("Search object nodes") + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[ClusterNode]: + endpoint = alive_node.storage_node.get_rpc_endpoint() + + response = self.cli.object.nodes( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + json=True, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + + response_json = json.loads(response.stdout) + # Currently, the command will show expected and confirmed nodes. + # And we (currently) count only nodes which are both expected and confirmed + object_nodes_id = { + required_node + for data_object in response_json["data_objects"] + for required_node in data_object["required_nodes"] + if required_node in data_object["confirmed_nodes"] + } + + netmap_nodes_list = cli_utils.parse_netmap_output( + self.cli.netmap.snapshot( + rpc_endpoint=endpoint, + ).stdout + ) + netmap_nodes = [ + netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id + ] + + object_nodes = [ + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) + ] + + return object_nodes + + @reporter.step("Search parts of object") + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[str]: + endpoint = alive_node.storage_node.get_rpc_endpoint() + response = self.cli.object.nodes( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + json=True, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + response_json = json.loads(response.stdout) + return [data_object["object_id"] for data_object in response_json["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py new file mode 100644 index 00000000..17b3e9c0 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py @@ -0,0 +1,4 @@ +from .chunks import ChunksInterface +from .container import ContainerInterface +from .netmap import NetmapInterface +from .object import ObjectInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py new file mode 100644 index 00000000..986b938e --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py @@ -0,0 +1,79 @@ +from abc import ABC, abstractmethod +from typing import Optional + +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo + + +class ChunksInterface(ABC): + @abstractmethod + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + pass + + @abstractmethod + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + pass + + @abstractmethod + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + pass + + @abstractmethod + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + pass + + @abstractmethod + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + @abstractmethod + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py new file mode 100644 index 00000000..d5e3eeb4 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py @@ -0,0 +1,125 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from frostfs_testlib.storage.cluster import Cluster, ClusterNode + + +class ContainerInterface(ABC): + @abstractmethod + def create( + self, + endpoint: str, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, + attributes: Optional[dict] = None, + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, + name: Optional[str] = None, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + """ + Create a new container and register it in the FrostFS. + It will be stored in the sidechain when the Inner Ring accepts it. + """ + raise NotImplementedError("No implemethed method create") + + @abstractmethod + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ) -> List[str]: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + """ + raise NotImplementedError("No implemethed method delete") + + @abstractmethod + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get container field info.""" + raise NotImplementedError("No implemethed method get") + + @abstractmethod + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get extended ACL table of container.""" + raise NotImplementedError("No implemethed method get-eacl") + + @abstractmethod + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + **params, + ) -> List[str]: + """List all created containers.""" + raise NotImplementedError("No implemethed method list") + + @abstractmethod + def nodes( + self, + endpoint: str, + cid: str, + cluster: Cluster, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + """Show the nodes participating in the container in the current epoch.""" + raise NotImplementedError("No implemethed method nodes") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py new file mode 100644 index 00000000..3fdc98ab --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py @@ -0,0 +1,89 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo + + +class NetmapInterface(ABC): + @abstractmethod + def epoch( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = False, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> int: + """ + Get current epoch number. + """ + raise NotImplementedError("No implemethed method epoch") + + @abstractmethod + def netinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetInfo: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method netinfo") + + @abstractmethod + def nodeinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeInfo: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method nodeinfo") + + @abstractmethod + def snapshot( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[NodeNetmapInfo]: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method snapshot") + + @abstractmethod + def snapshot_one_node( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[NodeNetmapInfo]: + """ + Get target one node info. + """ + raise NotImplementedError("No implemethed method snapshot") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py new file mode 100644 index 00000000..550c461c --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py @@ -0,0 +1,223 @@ +from abc import ABC, abstractmethod +from typing import Any, List, Optional + +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.utils import file_utils + +from .chunks import ChunksInterface + + +class ObjectInterface(ABC): + def __init__(self) -> None: + self.chunks: ChunksInterface + + @abstractmethod + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> file_utils.TestFile: + pass + + @abstractmethod + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def hash( + self, + endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult | Any: + pass + + @abstractmethod + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: Optional[list[str]] = None, + payloads: Optional[list[str]] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ) -> str: + pass + + @abstractmethod + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> tuple[file_utils.TestFile, bytes]: + pass + + @abstractmethod + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + oid: Optional[str] = None, + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, + ) -> List: + pass + + @abstractmethod + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + pass + + @abstractmethod + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[str]: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py new file mode 100644 index 00000000..6574012b --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py @@ -0,0 +1,10 @@ +from abc import ABC + +from . import interfaces + + +class GrpcClientWrapper(ABC): + def __init__(self) -> None: + self.object: interfaces.ObjectInterface + self.container: interfaces.ContainerInterface + self.netmap: interfaces.NetmapInterface diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index 49c6afd3..50c8eb6c 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -25,14 +25,10 @@ class ClusterTestBase: for _ in range(epochs_to_tick): self.tick_epoch(alive_node, wait_block) - def tick_epoch( - self, - alive_node: Optional[StorageNode] = None, - wait_block: int = None, - ): - epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) + def tick_epoch(self, alive_node: Optional[StorageNode] = None, wait_block: int = None, delta: Optional[int] = None): + epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node, delta=delta) if wait_block: - time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * wait_block) + self.wait_for_blocks(wait_block) def wait_for_epochs_align(self): epoch.wait_for_epochs_align(self.shell, self.cluster) @@ -42,3 +38,6 @@ class ClusterTestBase: def ensure_fresh_epoch(self): return epoch.ensure_fresh_epoch(self.shell, self.cluster) + + def wait_for_blocks(self, blocks_count: int = 1): + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * blocks_count) diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index 1c30cece..6c4f6e05 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -1,7 +1,22 @@ import itertools +import traceback from concurrent.futures import Future, ThreadPoolExecutor +from contextlib import contextmanager from typing import Callable, Collection, Optional, Union +MAX_WORKERS = 50 + + +@contextmanager +def parallel_workers_limit(workers_count: int): + global MAX_WORKERS + original_value = MAX_WORKERS + MAX_WORKERS = workers_count + try: + yield + finally: + MAX_WORKERS = original_value + def parallel( fn: Union[Callable, list[Callable]], @@ -41,7 +56,42 @@ def parallel( # Check for exceptions exceptions = [future.exception() for future in futures if future.exception()] if exceptions: - message = "\n".join([str(e) for e in exceptions]) + # Prettify exception in parallel with all underlying stack traces + # For example, we had 3 RuntimeError exceptions during parallel. This format will give us something like + # + # RuntimeError: The following exceptions occured during parallel run: + # 1) Exception one text + # 2) Exception two text + # 3) Exception three text + # TRACES: + # ==== 1 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception one text") + # RuntimeError: Exception one text + # + # ==== 2 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception two text") + # RuntimeError: Exception two text + # + # ==== 3 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception three text") + # RuntimeError: Exception three text + short_summary = "\n".join([f"{i}) {str(e)}" for i, e in enumerate(exceptions, 1)]) + stack_traces = "\n".join( + [f"==== {i} ====\n{''.join(traceback.TracebackException.from_exception(e).format())}" for i, e in enumerate(exceptions, 1)] + ) + message = f"{short_summary}\nTRACES:\n{stack_traces}" raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}") return futures @@ -54,7 +104,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: futures: list[Future] = [] - with ThreadPoolExecutor(max_workers=len(fn_list)) as executor: + with ThreadPoolExecutor(max_workers=min(len(fn_list), MAX_WORKERS)) as executor: for fn in fn_list: task_args = _get_args(*args) task_kwargs = _get_kwargs(**kwargs) @@ -67,7 +117,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]: futures: list[Future] = [] - with ThreadPoolExecutor(max_workers=len(parallel_items)) as executor: + with ThreadPoolExecutor(max_workers=min(len(parallel_items), MAX_WORKERS)) as executor: for item in parallel_items: task_args = _get_args(*args) task_kwargs = _get_kwargs(**kwargs) diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py index 4fa63902..bc38208e 100644 --- a/src/frostfs_testlib/testing/test_control.py +++ b/src/frostfs_testlib/testing/test_control.py @@ -1,13 +1,16 @@ import inspect import logging +import os from functools import wraps from time import sleep, time from typing import Any +import yaml from _pytest.outcomes import Failed from pytest import fail from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.utils.func_utils import format_by_args logger = logging.getLogger("NeoLogger") @@ -128,6 +131,42 @@ def run_optionally(enabled: bool, mock_value: Any = True): return deco +def cached_fixture(enabled: bool): + """ + Decorator to cache fixtures. + MUST be placed after @pytest.fixture and before @allure decorators. + + Args: + enabled: if true, decorated func will be cached. + """ + + def deco(func): + @wraps(func) + def func_impl(*a, **kw): + # TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters + cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml") + + if enabled and os.path.exists(cache_file): + with open(cache_file, "r") as cache_input: + return yaml.load(cache_input, Loader=yaml.Loader) + + result = func(*a, **kw) + + if enabled: + with open(cache_file, "w") as cache_output: + yaml.dump(result, cache_output) + return result + + # TODO: cache yielding fixtures + @wraps(func) + def gen_impl(*a, **kw): + raise NotImplementedError("Not implemented for yielding fixtures") + + return gen_impl if inspect.isgeneratorfunction(func) else func_impl + + return deco + + def wait_for_success( max_wait_time: int = 60, interval: int = 1, diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 41d52abe..87872967 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -9,13 +9,12 @@ import csv import json import logging import re -import subprocess import sys from contextlib import suppress from datetime import datetime from io import StringIO from textwrap import shorten -from typing import Dict, List, TypedDict, Union +from typing import Any, Optional, Union import pexpect @@ -41,7 +40,7 @@ def _run_with_passwd(cmd: str) -> str: return cmd.decode() -def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str: +def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_format: str = "json") -> str: child = pexpect.spawn(cmd) child.delaybeforesend = 1 @@ -52,7 +51,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = child.sendline(access_key) child.expect("Default region name.*") - child.sendline("") + child.sendline("region") child.expect("Default output format.*") child.sendline(out_format) @@ -69,20 +68,84 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" f"RC: {return_code}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {end_time - start_time}" ) with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): reporter.attach(command_attachment, "Command execution") -def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None: +def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[dict] = None, **kwargs) -> None: logger.info(f"{cmd}: {output}") - with suppress(Exception): - json_output = json.dumps(output, indent=4, sort_keys=True) - output = json_output - command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" - with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): - reporter.attach(command_attachment, "Command execution") + + if not params: + params = {} + + if params.get("Body") and len(params.get("Body")) > 1000: + params["Body"] = "" + + output_params = params + + try: + json_params = json.dumps(params, indent=4, sort_keys=True, default=str) + except TypeError as err: + logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}") + else: + output_params = json_params + + output = json.dumps(output, indent=4, sort_keys=True, default=str) + + command_execution = f"COMMAND: '{cmd}'\n" f"URL: {kwargs['endpoint']}\n" f"PARAMS:\n{output_params}\n" f"OUTPUT:\n{output}\n" + aws_command = _convert_request_to_aws_cli_command(cmd, params, **kwargs) + + reporter.attach(command_execution, "Command execution") + reporter.attach(aws_command, "AWS CLI Command") + + +def _convert_request_to_aws_cli_command(command: str, params: dict, **kwargs) -> str: + overriden_names = [_convert_json_name_to_aws_cli(name) for name in kwargs.keys()] + command = command.replace("_", "-") + options = [] + + for name, value in params.items(): + name = _convert_json_name_to_aws_cli(name) + + # To override parameters for AWS CLI + if name in overriden_names: + continue + + if option := _create_option(name, value): + options.append(option) + + for name, value in kwargs.items(): + name = _convert_json_name_to_aws_cli(name) + if option := _create_option(name, value): + options.append(option) + + options = " ".join(options) + api = "s3api" if "s3" in kwargs["endpoint"] else "iam" + return f"aws --no-verify-ssl --no-paginate {api} {command} {options}" + + +def _convert_json_name_to_aws_cli(name: str) -> str: + specific_names = {"CORSConfiguration": "cors-configuration"} + + if aws_cli_name := specific_names.get(name): + return aws_cli_name + return re.sub(r"([a-z])([A-Z])", r"\1 \2", name).lower().replace(" ", "-").replace("_", "-") + + +def _create_option(name: str, value: Any) -> str | None: + if isinstance(value, bool) and value: + return f"--{name}" + + if isinstance(value, dict): + value = json.dumps(value, indent=4, sort_keys=True, default=str) + return f"--{name} '{value}'" + + if value: + return f"--{name} {value}" + + return None def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index d238106c..8839d7f0 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -6,11 +6,46 @@ from typing import Any, Optional from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.utils import string_utils logger = logging.getLogger("NeoLogger") -def generate_file(size: int) -> str: +class TestFile(os.PathLike): + def __init__(self, path: str): + self.path = path + + def __del__(self): + logger.debug(f"Removing file {self.path}") + if os.path.exists(self.path): + os.remove(self.path) + + def __str__(self): + return self.path + + def __repr__(self): + return self.path + + def __fspath__(self): + return self.path + + +def ensure_directory(path): + directory = os.path.dirname(path) + + if not os.path.exists(directory): + os.makedirs(directory) + + +def ensure_directory_opener(path, flags): + ensure_directory(path) + return os.open(path, flags) + + +# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps +# Use object_size dt in future as argument +@reporter.step("Generate file") +def generate_file(size: int, file_name: Optional[str] = None) -> TestFile: """Generates a binary file with the specified size in bytes. Args: @@ -19,19 +54,26 @@ def generate_file(size: int) -> str: Returns: The path to the generated file. """ - file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) - with open(file_path, "wb") as file: + + if file_name is None: + file_name = string_utils.unique_name("object-") + + test_file = TestFile(os.path.join(ASSETS_DIR, file_name)) + with open(test_file, "wb", opener=ensure_directory_opener) as file: file.write(os.urandom(size)) - logger.info(f"File with size {size} bytes has been generated: {file_path}") + logger.info(f"File with size {size} bytes has been generated: {test_file}") - return file_path + return test_file +# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps +# Use object_size dt in future as argument +@reporter.step("Generate file with content") def generate_file_with_content( size: int, - file_path: Optional[str] = None, + file_path: Optional[str | TestFile] = None, content: Optional[str] = None, -) -> str: +) -> TestFile: """Creates a new file with specified content. Args: @@ -48,20 +90,22 @@ def generate_file_with_content( content = os.urandom(size) mode = "wb" + test_file = None if not file_path: - file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + elif isinstance(file_path, TestFile): + test_file = file_path else: - if not os.path.exists(os.path.dirname(file_path)): - os.makedirs(os.path.dirname(file_path)) + test_file = TestFile(file_path) - with open(file_path, mode) as file: + with open(test_file, mode, opener=ensure_directory_opener) as file: file.write(content) - return file_path + return test_file @reporter.step("Get File Hash") -def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: +def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset: Optional[int] = None) -> str: """Generates hash for the specified file. Args: @@ -88,7 +132,7 @@ def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[in @reporter.step("Concatenation set of files to one file") -def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: +def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional[str | TestFile] = None) -> TestFile: """Concatenates several files into a single file. Args: @@ -98,16 +142,24 @@ def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> Returns: Path to the resulting file. """ + + test_file = None if not resulting_file_path: - resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - with open(resulting_file_path, "wb") as f: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + elif isinstance(resulting_file_path, TestFile): + test_file = resulting_file_path + else: + test_file = TestFile(resulting_file_path) + + with open(test_file, "wb", opener=ensure_directory_opener) as f: for file in file_paths: with open(file, "rb") as part_file: f.write(part_file.read()) - return resulting_file_path + return test_file -def split_file(file_path: str, parts: int) -> list[str]: +@reporter.step("Split file to {parts} parts") +def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]: """Splits specified file into several specified number of parts. Each part is saved under name `{original_file}_part_{i}`. @@ -129,7 +181,7 @@ def split_file(file_path: str, parts: int) -> list[str]: part_file_paths = [] for content_offset in range(0, content_size + 1, chunk_size): part_file_name = f"{file_path}_part_{part_id}" - part_file_paths.append(part_file_name) + part_file_paths.append(TestFile(part_file_name)) with open(part_file_name, "wb") as out_file: out_file.write(content[content_offset : content_offset + chunk_size]) part_id += 1 @@ -137,9 +189,8 @@ def split_file(file_path: str, parts: int) -> list[str]: return part_file_paths -def get_file_content( - file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None -) -> Any: +@reporter.step("Get file content") +def get_file_content(file_path: str | TestFile, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None) -> Any: """Returns content of specified file. Args: diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index a80192cf..acbca92f 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -1,11 +1,29 @@ +import itertools import random import re import string +from datetime import datetime ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation +# if unique_name is called multiple times within the same microsecond, append 0-4 to the name so it surely unique +FUSE = itertools.cycle(range(5)) + + +def unique_name(prefix: str = "", postfix: str = ""): + """ + Generate unique short name of anything with prefix. + This should be unique in scope of multiple runs + + Args: + prefix: prefix for unique name generation + Returns: + unique name string + """ + return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{next(FUSE)}{postfix}" + def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): """ diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index f1b7e374..06760851 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -1,5 +1,6 @@ import logging import re +from functools import lru_cache from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli @@ -17,14 +18,14 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]: out = shell.exec(f"{binary} --version").stdout - versions[binary] = _parse_version(out) + versions[binary] = parse_version(out) frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) - versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout) + versions[FROSTFS_CLI_EXEC] = parse_version(frostfs_cli.version.get().stdout) try: frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC) - versions[FROSTFS_ADM_EXEC] = _parse_version(frostfs_adm.version.get().stdout) + versions[FROSTFS_ADM_EXEC] = parse_version(frostfs_adm.version.get().stdout) except RuntimeError: logger.info(f"{FROSTFS_ADM_EXEC} not installed") @@ -36,80 +37,54 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: return versions +@reporter.step("Collect binaries versions from host") def parallel_binary_verions(host: Host) -> dict[str, str]: versions_by_host = {} - binary_path_by_name = {} # Maps binary name to executable path - for service_config in host.config.services: - exec_path = service_config.attributes.get("exec_path") - requires_check = service_config.attributes.get("requires_version_check", "true") - if exec_path: - binary_path_by_name[service_config.name] = { - "exec_path": exec_path, - "check": requires_check.lower() == "true", + binary_path_by_name = { + **{ + svc.name[:-3]: { + "exec_path": svc.attributes.get("exec_path"), + "param": svc.attributes.get("custom_version_parameter", "--version"), } - for cli_config in host.config.clis: - requires_check = cli_config.attributes.get("requires_version_check", "true") - binary_path_by_name[cli_config.name] = { - "exec_path": cli_config.exec_path, - "check": requires_check.lower() == "true", - } + for svc in host.config.services + if svc.attributes.get("exec_path") and svc.attributes.get("requires_version_check", "true") == "true" + }, + **{ + cli.name: {"exec_path": cli.exec_path, "param": cli.attributes.get("custom_version_parameter", "--version")} + for cli in host.config.clis + if cli.attributes.get("requires_version_check", "true") == "true" + }, + } shell = host.get_shell() versions_at_host = {} for binary_name, binary in binary_path_by_name.items(): + binary_path = binary["exec_path"] try: - binary_path = binary["exec_path"] - result = shell.exec(f"{binary_path} --version") - versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} + result = shell.exec(f"{binary_path} {binary['param']}") + version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" + versions_at_host[binary_name] = version.strip() except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") - versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} + versions_at_host[binary_name] = "Unknown" versions_by_host[host.config.address] = versions_at_host return versions_by_host -@reporter.step("Get remote binaries versions") -def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: - versions_by_host = {} - future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) +@lru_cache +def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]: + versions_by_host: dict[str, dict[str, str]] = {} + + with reporter.step("Get remote binaries versions"): + future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) + for future in future_binary_verions: versions_by_host.update(future.result()) - # Consolidate versions across all hosts - cheak_versions = {} - exсeptions = [] - exception = set() - previous_host = None - versions = {} - captured_version = None - for host, binary_versions in versions_by_host.items(): - for name, binary in binary_versions.items(): - version = binary["version"] - if not cheak_versions.get(f"{name[:-2]}", None): - captured_version = cheak_versions.get(f"{name[:-2]}", {}).get(host, {}).get(captured_version) - cheak_versions[f"{name[:-2]}"] = {host: {version: name}} - else: - captured_version = list(cheak_versions.get(f"{name[:-2]}", {}).get(previous_host).keys())[0] - cheak_versions[f"{name[:-2]}"].update({host: {version: name}}) - - if captured_version and captured_version != version: - exception.add(name[:-2]) - - versions[name] = {"version": version, "check": binary["check"]} - previous_host = host - logger.info( - "Remote binaries versions:\n" + "\n".join([f"{key} ver: {value['version']}" for key, value in versions.items()]) - ) - if exception: - for i in exception: - for host in versions_by_host.keys(): - for version, name in cheak_versions.get(i).get(host).items(): - exсeptions.append(f"Binary {name} has inconsistent version {version} on host {host}") - exсeptions.append("\n") - return versions, exсeptions + return versions_by_host -def _parse_version(version_output: str) -> str: - version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) - return version.group(1).strip() if version else version_output +def parse_version(version_output: str) -> str: + version = re.search(r"(?<=version[:=])\s?[\"\']?v?(.+)", version_output, re.IGNORECASE) + return version.group(1).strip("\"'\n\t ") if version else version_output diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index 19f38322..677aed45 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -2,7 +2,7 @@ from typing import Any import pytest -from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper +from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper from frostfs_testlib.storage.dataclasses.acl import EACLRole from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.object_size import ObjectSize diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 62339f61..fbeb587d 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -3,20 +3,10 @@ from typing import Any, get_args import pytest -from frostfs_testlib.load.load_config import ( - EndpointSelectionStrategy, - LoadParams, - LoadScenario, - LoadType, - Preset, - ReadFrom, -) +from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME -from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController -from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode -from frostfs_testlib.storage.dataclasses.node_base import NodeBase @dataclass @@ -99,9 +89,7 @@ class TestLoadConfig: def test_load_controller_string_representation(self, load_params: LoadParams): load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL load_params.object_size = 512 - background_load_controller = BackgroundLoadController( - "tmp", load_params, "wallet", None, None, DefaultRunner(None) - ) + background_load_controller = BackgroundLoadController("tmp", load_params, None, None, DefaultRunner(None)) expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" assert f"{background_load_controller}" == expected assert repr(background_load_controller) == expected @@ -138,10 +126,12 @@ class TestLoadConfig: "--size '11'", "--acl 'acl'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--local", @@ -170,10 +160,12 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--local", @@ -214,7 +206,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--buckets '13'", - "--location 's3_location'", + "--location 's3_location' --location 's3_location_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -248,7 +240,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--buckets '13'", - "--location 's3_location'", + "--location 's3_location' --location 's3_location_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -288,7 +280,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--buckets '13'", - "--location 's3_location'", + "--location 's3_location' --location 's3_location_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -326,10 +318,12 @@ class TestLoadConfig: "--no-verify-ssl", "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -359,15 +353,18 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", ] expected_env_vars = { + "CONFIG_DIR": "config_dir", "CONFIG_FILE": "config_file", "DURATION": 9, "WRITE_OBJ_SIZE": 11, @@ -380,12 +377,69 @@ class TestLoadConfig: "DELETERS": 8, "READ_AGE": 8, "STREAMING": 9, + "MAX_TOTAL_SIZE_GB": 17, "PREGEN_JSON": "pregen_json", } self._check_preset_params(load_params, expected_preset_args) self._check_env_vars(load_params, expected_env_vars) + @pytest.mark.parametrize( + "input, value, params", + [ + (["A C ", " B"], ["A C", "B"], [f"--policy 'A C' --policy 'B'"]), + (" A ", ["A"], ["--policy 'A'"]), + (" A , B ", ["A , B"], ["--policy 'A , B'"]), + ([" A", "B "], ["A", "B"], ["--policy 'A' --policy 'B'"]), + (None, None, []), + ], + ) + def test_grpc_list_parsing_formatter(self, input, value, params): + load_params = LoadParams(LoadType.gRPC) + load_params.preset = Preset() + load_params.preset.container_placement_policy = input + assert load_params.preset.container_placement_policy == value + + self._check_preset_params(load_params, params) + + @pytest.mark.parametrize( + "input, value, params", + [ + (["A C ", " B"], ["A C", "B"], [f"--location 'A C' --location 'B'"]), + (" A ", ["A"], ["--location 'A'"]), + (" A , B ", ["A , B"], ["--location 'A , B'"]), + ([" A", "B "], ["A", "B"], ["--location 'A' --location 'B'"]), + (None, None, []), + ], + ) + def test_s3_list_parsing_formatter(self, input, value, params): + load_params = LoadParams(LoadType.S3) + load_params.preset = Preset() + load_params.preset.s3_location = input + assert load_params.preset.s3_location == value + + self._check_preset_params(load_params, params) + + @pytest.mark.parametrize( + "load_type, input, value, params", + [ + (LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]), + (LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]), + (LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]), + (LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]), + (LoadType.gRPC, None, None, []), + (LoadType.S3, ["A C ", " B"], ["A C", "B"], []), + (LoadType.S3, None, None, []), + ], + ) + def test_ape_list_parsing_formatter(self, load_type, input, value, params): + load_params = LoadParams(load_type) + load_params.preset = Preset() + load_params.preset.rule = input + assert load_params.preset.rule == value + + self._check_preset_params(load_params, params) + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): expected_env_vars = { @@ -415,6 +469,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -446,6 +502,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -553,6 +611,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -584,6 +644,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -592,6 +654,7 @@ class TestLoadConfig: "--acl ''", ] expected_env_vars = { + "CONFIG_DIR": "", "CONFIG_FILE": "", "DURATION": 0, "WRITE_OBJ_SIZE": 0, @@ -599,6 +662,7 @@ class TestLoadConfig: "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", + "MAX_TOTAL_SIZE_GB": 0, "WRITERS": 0, "READERS": 0, "DELETERS": 0, @@ -689,9 +753,7 @@ class TestLoadConfig: value = getattr(dataclass, field.name) assert value is not None, f"{field.name} is not None" - def _get_filled_load_params( - self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False - ) -> LoadParams: + def _get_filled_load_params(self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False) -> LoadParams: load_type_map = { LoadScenario.S3: LoadType.S3, LoadScenario.S3_CAR: LoadType.S3, @@ -708,13 +770,12 @@ class TestLoadConfig: meta_fields = self._get_meta_fields(load_params) for field in meta_fields: - if ( - getattr(field.instance, field.field.name) is None - and load_params.scenario in field.field.metadata["applicable_scenarios"] - ): + if getattr(field.instance, field.field.name) is None and load_params.scenario in field.field.metadata["applicable_scenarios"]: value_to_set_map = { int: 0 if set_emtpy else len(field.field.name), + float: 0 if set_emtpy else len(field.field.name), str: "" if set_emtpy else field.field.name, + list[str]: "" if set_emtpy else [field.field.name, f"{field.field.name}_2"], bool: False if set_emtpy else True, } value_to_set = value_to_set_map[field.field_type] @@ -727,11 +788,7 @@ class TestLoadConfig: def _get_meta_fields(self, instance): data_fields = fields(instance) - fields_with_data = [ - MetaTestField(field, self._get_actual_field_type(field), instance) - for field in data_fields - if field.metadata - ] + fields_with_data = [MetaTestField(field, self._get_actual_field_type(field), instance) for field in data_fields if field.metadata] for field in data_fields: actual_field_type = self._get_actual_field_type(field)