diff --git a/.devenv.hosting.yaml b/.devenv.hosting.yaml deleted file mode 100644 index f3b8c51..0000000 --- a/.devenv.hosting.yaml +++ /dev/null @@ -1,109 +0,0 @@ -hosts: -- address: localhost - hostname: localhost - attributes: - sudo_shell: false - plugin_name: docker - healthcheck_plugin_name: basic - attributes: - skip_readiness_check: True - force_transactions: True - services: - - name: frostfs-storage_01 - attributes: - container_name: s01 - config_path: /etc/frostfs/storage/config.yml - wallet_path: ../frostfs-dev-env/services/storage/wallet01.json - local_wallet_config_path: ./TemporaryDir/empty-password.yml - local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json - wallet_password: "" - volume_name: storage_storage_s01 - endpoint_data0: s01.frostfs.devenv:8080 - control_endpoint: s01.frostfs.devenv:8081 - un_locode: "RU MOW" - - name: frostfs-storage_02 - attributes: - container_name: s02 - config_path: /etc/frostfs/storage/config.yml - wallet_path: ../frostfs-dev-env/services/storage/wallet02.json - local_wallet_config_path: ./TemporaryDir/empty-password.yml - local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json - wallet_password: "" - volume_name: storage_storage_s02 - endpoint_data0: s02.frostfs.devenv:8080 - control_endpoint: s02.frostfs.devenv:8081 - un_locode: "RU LED" - - name: frostfs-storage_03 - attributes: - container_name: s03 - config_path: /etc/frostfs/storage/config.yml - wallet_path: ../frostfs-dev-env/services/storage/wallet03.json - local_wallet_config_path: ./TemporaryDir/empty-password.yml - local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json - wallet_password: "" - volume_name: storage_storage_s03 - endpoint_data0: s03.frostfs.devenv:8080 - control_endpoint: s03.frostfs.devenv:8081 - un_locode: "SE STO" - - name: frostfs-storage_04 - attributes: - container_name: s04 - config_path: /etc/frostfs/storage/config.yml - wallet_path: ../frostfs-dev-env/services/storage/wallet04.json - local_wallet_config_path: ./TemporaryDir/empty-password.yml - local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json - wallet_password: "" - volume_name: storage_storage_s04 - endpoint_data0: s04.frostfs.devenv:8080 - control_endpoint: s04.frostfs.devenv:8081 - un_locode: "FI HEL" - - name: frostfs-s3_01 - attributes: - container_name: s3_gate - config_path: ../frostfs-dev-env/services/s3_gate/.s3.env - wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json - local_wallet_config_path: ./TemporaryDir/password-s3.yml - local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json - wallet_password: "s3" - endpoint_data0: https://s3.frostfs.devenv:8080 - - name: frostfs-http_01 - attributes: - container_name: http_gate - config_path: ../frostfs-dev-env/services/http_gate/.http.env - wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json - local_wallet_config_path: ./TemporaryDir/password-other.yml - local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json - wallet_password: "one" - endpoint_data0: http://http.frostfs.devenv - - name: frostfs-ir_01 - attributes: - container_name: ir01 - config_path: ../frostfs-dev-env/services/ir/.ir.env - wallet_path: ../frostfs-dev-env/services/ir/az.json - local_wallet_config_path: ./TemporaryDir/password-other.yml - local_wallet_path: ../frostfs-dev-env/services/ir/az.json - wallet_password: "one" - - name: neo-go_01 - attributes: - container_name: morph_chain - config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml - wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json - local_wallet_config_path: ./TemporaryDir/password-other.yml - local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json - wallet_password: "one" - endpoint_internal0: http://morph-chain.frostfs.devenv:30333 - - name: main-chain_01 - attributes: - container_name: main_chain - config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml - wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json - local_wallet_config_path: ./TemporaryDir/password-other.yml - local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json - wallet_password: "one" - endpoint_internal0: http://main-chain.frostfs.devenv:30333 - - name: coredns_01 - attributes: - container_name: coredns - clis: - - name: frostfs-cli - exec_path: frostfs-cli diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml deleted file mode 100644 index 9aa0d31..0000000 --- a/.forgejo/workflows/dco.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: DCO action -on: [pull_request] - -jobs: - dco: - name: DCO - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: '1.21' - - - name: Run commit format checker - uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 - with: - from: 'origin/${{ github.event.pull_request.base.ref }}' diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..1422062 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml new file mode 100644 index 0000000..40ed8fc --- /dev/null +++ b/.github/workflows/dco.yml @@ -0,0 +1,21 @@ +name: DCO check + +on: + pull_request: + branches: + - master + +jobs: + commits_check_job: + runs-on: ubuntu-latest + name: Commits Check + steps: + - name: Get PR Commits + id: 'get-pr-commits' + uses: tim-actions/get-pr-commits@master + with: + token: ${{ secrets.GITHUB_TOKEN }} + - name: DCO Check + uses: tim-actions/dco@master + with: + commits: ${{ steps.get-pr-commits.outputs.commits }} diff --git a/.gitignore b/.gitignore index 4691fe4..a7f7de0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ # ignore IDE files .vscode .idea -venv.* # ignore temp files under any path .DS_Store @@ -11,4 +10,3 @@ venv.* /dist /build *.egg-info -wallet_config.yml \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS deleted file mode 100644 index 519ca42..0000000 --- a/CODEOWNERS +++ /dev/null @@ -1,3 +0,0 @@ -.* @TrueCloudLab/qa-committers -.forgejo/.* @potyarkin -Makefile @potyarkin diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 69417d2..5996820 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,8 +3,8 @@ First, thank you for contributing! We love and encourage pull requests from everyone. Please follow the guidelines: -- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/issues) and - [pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/pulls) for existing +- Check the open [issues](https://github.com/TrueCloudLab/frostfs-testlib/issues) and + [pull requests](https://github.com/TrueCloudLab/frostfs-testlib/pulls) for existing discussions. - Open an issue first, to discuss a new feature or enhancement. @@ -26,8 +26,8 @@ Start by forking the `frostfs-testlib` repository, make changes in a branch and send a pull request. We encourage pull requests to discuss code changes. Here are the steps in details: -### Set up your Git Repository -Fork [FrostFS testlib upstream](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/forks) source +### Set up your GitHub Repository +Fork [FrostFS testlib upstream](https://github.com/TrueCloudLab/frostfs-testlib/fork) source repository to your own personal repository. Copy the URL of your fork and clone it: ```shell @@ -37,7 +37,7 @@ $ git clone ### Set up git remote as ``upstream`` ```shell $ cd frostfs-testlib -$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-testlib +$ git remote add upstream https://github.com/TrueCloudLab/frostfs-testlib $ git fetch upstream ``` @@ -63,9 +63,9 @@ $ git checkout -b feature/123-something_awesome ``` ### Test your changes -Before submitting any changes to the library, please, make sure that linter and all unit tests are passing. To run the tests, please, use the following command: +Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command: ```shell -$ make validation +$ python -m unittest discover --start-directory tests ``` To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests: @@ -99,8 +99,8 @@ $ git push origin feature/123-something_awesome ``` ### Create a Pull Request -Pull requests can be created via Git. Refer to [this -document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for +Pull requests can be created via GitHub. Refer to [this +document](https://help.github.com/articles/creating-a-pull-request/) for detailed steps on how to create a pull request. After a Pull Request gets peer reviewed and approved, it will be merged. diff --git a/Makefile b/Makefile index 644eab0..c746608 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,8 @@ SHELL := /bin/bash PYTHON_VERSION := 3.10 -VENV_NAME := frostfs-testlib -VENV_DIR := venv.${VENV_NAME} +VENV_DIR := venv.frostfs-testlib current_dir := $(shell pwd) -DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/))) -FROM_VENV := . ${VENV_DIR}/bin/activate && venv: create requirements paths precommit @echo Ready @@ -16,35 +13,15 @@ precommit: paths: @echo Append paths for project - @echo Virtual environment: ${current_dir}/${VENV_DIR} - @rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @echo ${current_dir}/src | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo Virtual environment: ${VENV_DIR} + @sudo rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @sudo touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | sudo tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth -create: ${VENV_DIR} - -${VENV_DIR}: - @echo Create virtual environment ${current_dir}/${VENV_DIR} - virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR} +create: + @echo Create virtual environment for + virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR} requirements: @echo Isntalling pip requirements - . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt - - -#### VALIDATION SECTION #### -lint: create requirements - ${FROM_VENV} pylint --disable R,C,W ./src - -unit_test: - @echo Starting unit tests - ${FROM_VENV} python -m pytest tests - -.PHONY: lint_dependent $(DIRECTORIES) -lint_dependent: $(DIRECTORIES) - -$(DIRECTORIES): - @echo checking dependent repo $@ - $(MAKE) validation -C $@ - -validation: lint unit_test lint_dependent \ No newline at end of file + . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt \ No newline at end of file diff --git a/README.md b/README.md index 2f8751f..c194df9 100644 --- a/README.md +++ b/README.md @@ -92,4 +92,4 @@ The library provides the following primary components: ## Contributing -Any contributions to the library should conform to the [contribution guideline](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/src/branch/master/CONTRIBUTING.md). +Any contributions to the library should conform to the [contribution guideline](https://github.com/TrueCloudLab/frostfs-testlib/blob/master/CONTRIBUTING.md). diff --git a/pyproject.toml b/pyproject.toml index 2778f8a..9140ee0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,17 +18,17 @@ keywords = ["frostfs", "test"] dependencies = [ "allure-python-commons>=2.13.2", "docker>=4.4.0", - "pyyaml==6.0.1", + "importlib_metadata>=5.0; python_version < '3.10'", "neo-mamba==1.0.0", "paramiko>=2.10.3", "pexpect>=4.8.0", - "requests==2.28.1", + "requests>=2.28.0", "docstring_parser>=0.15", "testrail-api>=1.12.0", "pytest==7.1.2", "tenacity==8.0.1", - "boto3==1.35.30", - "boto3-stubs[essential]==1.35.30", + "boto3==1.16.33", + "boto3-stubs[essential]==1.16.33", ] requires-python = ">=3.10" @@ -36,7 +36,7 @@ requires-python = ">=3.10" dev = ["black", "bumpver", "isort", "pre-commit"] [project.urls] -Homepage = "https://git.frostfs.info/TrueCloudLab/frostfs-testlib" +Homepage = "https://github.com/TrueCloudLab/frostfs-testlib" [project.entry-points."frostfs.testlib.reporter"] allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" @@ -44,33 +44,13 @@ allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" [project.entry-points."frostfs.testlib.hosting"] docker = "frostfs_testlib.hosting.docker_host:DockerHost" -[project.entry-points."frostfs.testlib.healthcheck"] -basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" - -[project.entry-points."frostfs.testlib.csc_managers"] -config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager" - -[project.entry-points."frostfs.testlib.services"] -frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode" -frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate" -frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" -neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" -frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" - -[project.entry-points."frostfs.testlib.credentials_providers"] -authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider" -wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider" - -[project.entry-points."frostfs.testlib.bucket_cid_resolver"] -frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver" - [tool.isort] profile = "black" src_paths = ["src", "tests"] -line_length = 140 +line_length = 100 [tool.black] -line-length = 140 +line-length = 100 target-version = ["py310"] [tool.bumpver] @@ -84,12 +64,3 @@ push = false [tool.bumpver.file_patterns] "pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"'] "src/frostfs_testlib/__init__.py" = ["{version}"] - -[tool.pytest.ini_options] -filterwarnings = [ - "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", -] -testpaths = ["tests"] - -[project.entry-points.pytest11] -testlib = "frostfs_testlib" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index a0bcc11..c653f7b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ -allure-python-commons==2.13.2 +allure-python-commons==2.9.45 docker==4.4.0 +importlib_metadata==5.0.0 neo-mamba==1.0.0 paramiko==2.10.3 pexpect==4.8.0 @@ -8,16 +9,14 @@ docstring_parser==0.15 testrail-api==1.12.0 tenacity==8.0.1 pytest==7.1.2 -boto3==1.35.30 -boto3-stubs[essential]==1.35.30 -pydantic==2.10.6 +boto3==1.16.33 +boto3-stubs[essential]==1.16.33 # Dev dependencies black==22.8.0 bumpver==2022.1118 isort==5.12.0 pre-commit==2.20.0 -pylint==2.17.4 # Packaging dependencies build==0.8.0 diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 4724a8b..159d48b 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,4 +1 @@ __version__ = "2.0.1" - -from .fixtures import configure_testlib, hosting, session_start_time, temp_directory -from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems diff --git a/src/frostfs_testlib/analytics/__init__.py b/src/frostfs_testlib/analytics/__init__.py index b057418..6995a08 100644 --- a/src/frostfs_testlib/analytics/__init__.py +++ b/src/frostfs_testlib/analytics/__init__.py @@ -1,5 +1,5 @@ from frostfs_testlib.analytics import test_case from frostfs_testlib.analytics.test_case import TestCasePriority from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector -from frostfs_testlib.analytics.test_exporter import TСExporter +from frostfs_testlib.analytics.test_exporter import TestExporter from frostfs_testlib.analytics.testrail_exporter import TestrailExporter diff --git a/src/frostfs_testlib/analytics/test_collector.py b/src/frostfs_testlib/analytics/test_collector.py index 56ee606..0f5398e 100644 --- a/src/frostfs_testlib/analytics/test_collector.py +++ b/src/frostfs_testlib/analytics/test_collector.py @@ -6,7 +6,6 @@ from docstring_parser.google import DEFAULT_SECTIONS, Section, SectionType DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE)) - class TestCase: """ Test case object implementation for use in collector and exporters @@ -107,9 +106,7 @@ class TestCaseCollector: # Read test_case suite and section name from test class if possible and get test function from class if test.cls: suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name) - suite_section_name = test.cls.__dict__.get( - "__test_case_suite_section__", suite_section_name - ) + suite_section_name = test.cls.__dict__.get("__test_case_suite_section__", suite_section_name) test_function = test.cls.__dict__[test.originalname] else: # If no test class, read test function from module @@ -120,9 +117,7 @@ class TestCaseCollector: test_case_title = test_function.__dict__.get("__test_case_title__", None) test_case_priority = test_function.__dict__.get("__test_case_priority__", None) suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name) - suite_section_name = test_function.__dict__.get( - "__test_case_suite_section__", suite_section_name - ) + suite_section_name = test_function.__dict__.get("__test_case_suite_section__", suite_section_name) # Parce test_steps if they define in __doc__ doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE) @@ -130,9 +125,7 @@ class TestCaseCollector: if doc_string.short_description: test_case_description = doc_string.short_description if doc_string.long_description: - test_case_description = ( - f"{doc_string.short_description}\r\n{doc_string.long_description}" - ) + test_case_description = f"{doc_string.short_description}\r\n{doc_string.long_description}" if doc_string.meta: for meta in doc_string.meta: @@ -147,27 +140,25 @@ class TestCaseCollector: test_case_params = test_case_call_spec.id # Format title with params if test_case_title: - test_case_title = self.__format_string_with_params__( - test_case_title, test_case_call_spec.params - ) + test_case_title = self.__format_string_with_params__(test_case_title,test_case_call_spec.params) # Format steps with params if test_case_steps: for key, value in test_case_steps.items(): - value = self.__format_string_with_params__(value, test_case_call_spec.params) + value = self.__format_string_with_params__(value,test_case_call_spec.params) test_case_steps[key] = value # If there is set basic test case attributes create TestCase and return if test_case_id and test_case_title and suite_name and suite_name: test_case = TestCase( - uuid_id=test_case_id, - title=test_case_title, - description=test_case_description, - priority=test_case_priority, - steps=test_case_steps, - params=test_case_params, - suite_name=suite_name, - suite_section_name=suite_section_name, - ) + id=test_case_id, + title=test_case_title, + description=test_case_description, + priority=test_case_priority, + steps=test_case_steps, + params=test_case_params, + suite_name=suite_name, + suite_section_name=suite_section_name, + ) return test_case # Return None if there is no enough information for return test case return None @@ -196,4 +187,4 @@ class TestCaseCollector: test_case = self.__get_test_case_from_pytest_test__(test) if test_case: test_cases.append(test_case) - return test_cases + return test_cases \ No newline at end of file diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index dd6a7fb..263995c 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -3,8 +3,7 @@ from abc import ABC, abstractmethod from frostfs_testlib.analytics.test_collector import TestCase -# TODO: REMOVE ME -class TСExporter(ABC): +class TestExporter(ABC): test_cases_cache = [] test_suites_cache = [] @@ -47,7 +46,9 @@ class TСExporter(ABC): """ @abstractmethod - def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: + def update_test_case( + self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section + ) -> None: """ Update test case in TMS """ @@ -59,11 +60,13 @@ class TСExporter(ABC): for test_case in test_cases: test_suite = self.get_or_create_test_suite(test_case.suite_name) - test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name) + test_section = self.get_or_create_suite_section( + test_suite, test_case.suite_section_name + ) test_case_in_tms = self.search_test_case_id(test_case.id) steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] if test_case_in_tms: - self.update_test_case(test_case, test_case_in_tms, test_suite, test_section) + self.update_test_case(test_case, test_case_in_tms) else: - self.create_test_case(test_case, test_suite, test_section) + self.create_test_case(test_case) diff --git a/src/frostfs_testlib/analytics/testrail_exporter.py b/src/frostfs_testlib/analytics/testrail_exporter.py index 36c482c..610fee5 100644 --- a/src/frostfs_testlib/analytics/testrail_exporter.py +++ b/src/frostfs_testlib/analytics/testrail_exporter.py @@ -1,10 +1,10 @@ from testrail_api import TestRailAPI from frostfs_testlib.analytics.test_collector import TestCase -from frostfs_testlib.analytics.test_exporter import TСExporter +from frostfs_testlib.analytics.test_exporter import TestExporter -class TestrailExporter(TСExporter): +class TestrailExporter(TestExporter): def __init__( self, tr_url: str, @@ -62,13 +62,19 @@ class TestrailExporter(TСExporter): It's help do not call TMS each time then we search test case """ for test_suite in self.test_suites_cache: - self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])) + self.test_cases_cache.extend( + self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"]) + ) def search_test_case_id(self, test_case_id: str) -> object: """ Find test cases in TestRail (cache) by ID """ - test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id] + test_cases = [ + test_case + for test_case in self.test_cases_cache + if test_case["custom_autotest_name"] == test_case_id + ] if len(test_cases) > 1: raise RuntimeError(f"Too many results found in test rail for id {test_case_id}") @@ -81,7 +87,9 @@ class TestrailExporter(TСExporter): """ Get suite name with exact name from Testrail or create if not exist """ - test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name] + test_rail_suites = [ + suite for suite in self.test_suites_cache if suite["name"] == test_suite_name + ] if not test_rail_suites: test_rail_suite = self.api.suites.add_suite( @@ -94,13 +102,17 @@ class TestrailExporter(TСExporter): elif len(test_rail_suites) == 1: return test_rail_suites.pop() else: - raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}") + raise RuntimeError( + f"Too many results found in test rail for suite name {test_suite_name}" + ) def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: """ Get suite section with exact name from Testrail or create new one if not exist """ - test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name] + test_rail_sections = [ + section for section in test_rail_suite["sections"] if section["name"] == section_name + ] if not test_rail_sections: test_rail_section = self.api.sections.add_section( @@ -116,7 +128,9 @@ class TestrailExporter(TСExporter): elif len(test_rail_sections) == 1: return test_rail_sections.pop() else: - raise RuntimeError(f"Too many results found in test rail for section name {section_name}") + raise RuntimeError( + f"Too many results found in test rail for section name {section_name}" + ) def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict: """ @@ -150,7 +164,9 @@ class TestrailExporter(TСExporter): self.api.cases.add_case(**request_body) - def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: + def update_test_case( + self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section + ) -> None: """ Update test case in Testrail """ diff --git a/src/frostfs_testlib/cli/__init__.py b/src/frostfs_testlib/cli/__init__.py index 7e3d243..3799be9 100644 --- a/src/frostfs_testlib/cli/__init__.py +++ b/src/frostfs_testlib/cli/__init__.py @@ -1,5 +1,4 @@ from frostfs_testlib.cli.frostfs_adm import FrostfsAdm from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate from frostfs_testlib.cli.frostfs_cli import FrostfsCli -from frostfs_testlib.cli.generic_cli import GenericCli from frostfs_testlib.cli.neogo import NeoGo, NetworkType diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 224e9e3..3600e77 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -1,11 +1,10 @@ from typing import Optional from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell -from frostfs_testlib.utils.datetime_utils import parse_time class CliCommand: - TIMEOUT_INACCURACY = 10 + WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" @@ -25,7 +24,9 @@ class CliCommand: def __init__(self, shell: Shell, cli_exec_path: str, **base_params): self.shell = shell self.cli_exec_path = cli_exec_path - self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value]) + self.__base_params = " ".join( + [f"--{param} {value}" for param, value in base_params.items() if value] + ) def _format_command(self, command: str, **params) -> str: param_str = [] @@ -47,7 +48,9 @@ class CliCommand: val_str = str(value_item).replace("'", "\\'") param_str.append(f"--{param} '{val_str}'") elif isinstance(value, dict): - param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'') + param_str.append( + f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'' + ) else: if "'" in str(value): value_str = str(value).replace('"', '\\"') @@ -60,22 +63,12 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - if timeout := params.get("timeout"): - timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY - - return self.shell.exec( - self._format_command(command, **params), - CommandOptions(timeout=timeout), - ) + return self.shell.exec(self._format_command(command, **params)) def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: - if timeout := params.get("timeout"): - timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY - return self.shell.exec( self._format_command(command, **params), - CommandOptions( - interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)], - timeout=timeout, + options=CommandOptions( + interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)] ), ) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index bdf4a91..3faa875 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -27,7 +27,11 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph deposit-notary", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def dump_balances( @@ -52,7 +56,11 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-balances", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def dump_config(self, rpc_endpoint: str) -> CommandResult: @@ -66,23 +74,11 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-config", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, - ) - - def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: - """Add/update global config value in the FrostFS network. - - Args: - set_key_value: key1=val1 [key2=val2 ...] - alphabet_wallets: Path to alphabet wallets dir - rpc_endpoint: N3 RPC node endpoint - - Returns: - Command's result. - """ - return self._execute( - f"morph set-config {set_key_value}", - **{param: param_value for param, param_value in locals().items() if param not in ["self", "set_key_value"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def dump_containers( @@ -105,10 +101,14 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-containers", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) - def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult: + def dump_hashes(self, rpc_endpoint: str) -> CommandResult: """Dump deployed contract hashes. Args: @@ -119,11 +119,15 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-hashes", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None + self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None ) -> CommandResult: """Create new FrostFS epoch event in the side chain. @@ -136,7 +140,11 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph force-new-epoch", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def generate_alphabet( @@ -157,7 +165,11 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph generate-alphabet", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def generate_storage_wallet( @@ -180,7 +192,11 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph generate-storage-wallet", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def init( @@ -203,7 +219,7 @@ class FrostfsAdmMorph(CliCommand): container_alias_fee: Container alias fee (default 500). container_fee: Container registration fee (default 1000). contracts: Path to archive with compiled FrostFS contracts - (default fetched from latest git release). + (default fetched from latest github release). epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240). homomorphic_disabled: Disable object homomorphic hashing. local_dump: Path to the blocks dump file. @@ -216,7 +232,11 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph init", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def refill_gas( @@ -239,7 +259,11 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph refill-gas", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def restore_containers( @@ -262,7 +286,11 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph restore-containers", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def set_policy( @@ -312,7 +340,7 @@ class FrostfsAdmMorph(CliCommand): Args: alphabet_wallets: Path to alphabet wallets dir. contracts: Path to archive with compiled FrostFS contracts - (default fetched from latest git release). + (default fetched from latest github release). rpc_endpoint: N3 RPC node endpoint. Returns: @@ -320,13 +348,17 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph update-contracts", - **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, ) def remove_nodes( self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None ) -> CommandResult: - """Move node to the Offline state in the candidates list + """ Move node to the Offline state in the candidates list and tick an epoch to update the netmap using frostfs-adm Args: @@ -339,150 +371,12 @@ class FrostfsAdmMorph(CliCommand): """ if not len(node_netmap_keys): raise AttributeError("Got empty node_netmap_keys list") - + return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", - **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, - ) - - def add_rule( - self, - chain_id: str, - target_name: str, - target_type: str, - rule: Optional[list[str]] = None, - path: Optional[str] = None, - chain_id_hex: Optional[bool] = None, - chain_name: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - chain-id: Assign ID to the parsed chain - chain-id-hex: Flag to parse chain ID as hex - path: Path to encoded chain in JSON or binary format - rule: Rule statement - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "morph ape add-rule-chain", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def get_rule( - self, - chain_id: str, - target_name: str, - target_type: str, - chain_id_hex: Optional[bool] = None, - chain_name: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - chain-id string Chain id - chain-id-hex Flag to parse chain ID as hex - target-name string Resource name in APE resource name format - target-type string Resource type(container/namespace) - timeout duration Timeout for an operation (default 15s) - wallet string Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "morph ape get-rule-chain", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list_rules( - self, - target_type: str, - target_name: Optional[str] = None, - rpc_endpoint: Optional[str] = None, - chain_name: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "morph ape list-rule-chains", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def remove_rule( - self, - chain_id: str, - target_name: str, - target_type: str, - all: Optional[bool] = None, - chain_name: Optional[str] = None, - chain_id_hex: Optional[bool] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - all: Remove all chains - chain-id: Assign ID to the parsed chain - chain-id-hex: Flag to parse chain ID as hex - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "morph ape rm-rule-chain", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def get_nns_records( - self, - name: str, - type: Optional[str] = None, - rpc_endpoint: Optional[str] = None, - alphabet_wallets: Optional[str] = None, - ) -> CommandResult: - """Returns domain record of the specified type - - Args: - name: Domain name - type: Domain name service record type(A|CNAME|SOA|TXT) - rpc_endpoint: N3 RPC node endpoint - alphabet_wallets: path to alphabet wallets dir - - Returns: - Command's result - """ - return self._execute( - "morph nns get-records", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self", "node_netmap_keys"] + }, + ) \ No newline at end of file diff --git a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py index 7912dae..ba3a3b0 100644 --- a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py +++ b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py @@ -6,8 +6,8 @@ from frostfs_testlib.shell import Shell class FrostfsAuthmate: - secret: FrostfsAuthmateSecret - version: FrostfsAuthmateVersion + secret: Optional[FrostfsAuthmateSecret] = None + version: Optional[FrostfsAuthmateVersion] = None def __init__(self, shell: Shell, frostfs_authmate_exec_path: str): self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path) diff --git a/src/frostfs_testlib/cli/frostfs_authmate/secret.py b/src/frostfs_testlib/cli/frostfs_authmate/secret.py index 5f300bc..ba5b5f5 100644 --- a/src/frostfs_testlib/cli/frostfs_authmate/secret.py +++ b/src/frostfs_testlib/cli/frostfs_authmate/secret.py @@ -44,6 +44,7 @@ class FrostfsAuthmateSecret(CliCommand): wallet: str, wallet_password: str, peer: str, + bearer_rules: str, gate_public_key: Union[str, list[str]], address: Optional[str] = None, container_id: Optional[str] = None, diff --git a/src/frostfs_testlib/cli/frostfs_cli/acl.py b/src/frostfs_testlib/cli/frostfs_cli/acl.py index 3e60582..bd0f80e 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/acl.py +++ b/src/frostfs_testlib/cli/frostfs_cli/acl.py @@ -22,7 +22,7 @@ class FrostfsCliACL(CliCommand): Well-known system object headers start with '$Object:' prefix. User defined headers start without prefix. Read more about filter keys at: - https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecord-filter + http://github.com/TrueCloudLab/frostfs-api/blob/master/proto-docs/acl.md#message-eaclrecordfilter Match is '=' for matching and '!=' for non-matching filter. Value is a valid unicode string corresponding to object or request header value. diff --git a/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py b/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py deleted file mode 100644 index 525a9be..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py +++ /dev/null @@ -1,70 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliApeManager(CliCommand): - """Operations with APE manager.""" - - def add( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - path: Optional[str] = None, - rule: Optional[str] | Optional[list[str]] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Add rule chain for a target.""" - - return self._execute( - "ape-manager add", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list( - self, - rpc_endpoint: str, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Generate APE override by target and APE chains. Util command. - - Generated APE override can be dumped to a file in JSON format that is passed to - "create" command. - """ - - return self._execute( - "ape-manager list", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def remove( - self, - rpc_endpoint: str, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - target_name: Optional[str] = None, - target_type: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Generate APE override by target and APE chains. Util command. - - Generated APE override can be dumped to a file in JSON format that is passed to - "create" command. - """ - - return self._execute( - "ape-manager remove", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/bearer.py b/src/frostfs_testlib/cli/frostfs_cli/bearer.py deleted file mode 100644 index e21a6c8..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/bearer.py +++ /dev/null @@ -1,54 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliBearer(CliCommand): - def create( - self, - rpc_endpoint: str, - out: str, - issued_at: Optional[str] = None, - expire_at: Optional[str] = None, - not_valid_before: Optional[str] = None, - ape: Optional[str] = None, - eacl: Optional[str] = None, - owner: Optional[str] = None, - json: Optional[bool] = False, - impersonate: Optional[bool] = False, - wallet: Optional[str] = None, - address: Optional[str] = None, - ) -> CommandResult: - """Create bearer token. - - All epoch flags can be specified relative to the current epoch with the +n syntax. - In this case --rpc-endpoint flag should be specified and the epoch in bearer token - is set to current epoch + n. - """ - return self._execute( - "bearer create", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def generate_ape_override( - self, - chain_id: Optional[str] = None, - chain_id_hex: Optional[str] = None, - cid: Optional[str] = None, - output: Optional[str] = None, - path: Optional[str] = None, - rule: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - ) -> CommandResult: - """Generate APE override by target and APE chains. Util command. - - Generated APE override can be dumped to a file in JSON format that is passed to - "create" command. - """ - - return self._execute( - "bearer generate-ape-override", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index d83b7ae..5d55f55 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -2,16 +2,12 @@ from typing import Optional from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL -from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager -from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer -from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup -from frostfs_testlib.cli.frostfs_cli.tree import FrostfsCliTree from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion from frostfs_testlib.shell import Shell @@ -28,7 +24,6 @@ class FrostfsCli: storagegroup: FrostfsCliStorageGroup util: FrostfsCliUtil version: FrostfsCliVersion - control: FrostfsCliControl def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) @@ -41,7 +36,3 @@ class FrostfsCli: self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file) self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file) self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) - self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) - self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file) - self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file) - self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 8bcbe9e..533ff1a 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -8,16 +8,12 @@ class FrostfsCliContainer(CliCommand): def create( self, rpc_endpoint: str, - wallet: Optional[str] = None, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, + wallet: str, address: Optional[str] = None, attributes: Optional[dict] = None, basic_acl: Optional[str] = None, await_mode: bool = False, disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, name: Optional[str] = None, nonce: Optional[str] = None, policy: Optional[str] = None, @@ -39,8 +35,6 @@ class FrostfsCliContainer(CliCommand): basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write', 'private', 'eacl-public-read' (default "private"). disable_timestamp: Disable timestamp container attribute. - force: Skip placement validity check. - trace: Generate trace ID and print it. name: Container name attribute. nonce: UUIDv4 nonce value for container. policy: QL-encoded or JSON-encoded placement policy or path to file with it. @@ -51,8 +45,6 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). - nns_zone: Container nns zone attribute. - nns_name: Container nns name attribute. Returns: Command's result. @@ -65,15 +57,15 @@ class FrostfsCliContainer(CliCommand): def delete( self, rpc_endpoint: str, + wallet: str, cid: str, - wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, force: bool = False, - trace: bool = False, + timeout: Optional[str] = None, ) -> CommandResult: """ Delete an existing container. @@ -83,13 +75,13 @@ class FrostfsCliContainer(CliCommand): address: Address of wallet account. await_mode: Block execution until container is removed. cid: Container ID. - trace: Generate trace ID and print it. force: Do not check whether container contains locks and remove immediately. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -103,14 +95,12 @@ class FrostfsCliContainer(CliCommand): def get( self, rpc_endpoint: str, + wallet: str, cid: str, - wallet: Optional[str] = None, address: Optional[str] = None, - generate_key: Optional[bool] = None, await_mode: bool = False, to: Optional[str] = None, json_mode: bool = False, - trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -123,14 +113,12 @@ class FrostfsCliContainer(CliCommand): await_mode: Block execution until container is removed. cid: Container ID. json_mode: Print or dump container in JSON format. - trace: Generate trace ID and print it. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). - generate_key: Generate a new private key. Returns: Command's result. @@ -143,10 +131,9 @@ class FrostfsCliContainer(CliCommand): def get_eacl( self, rpc_endpoint: str, + wallet: str, cid: str, - wallet: Optional[str] = None, address: Optional[str] = None, - generate_key: Optional[bool] = None, await_mode: bool = False, to: Optional[str] = None, session: Optional[str] = None, @@ -163,14 +150,11 @@ class FrostfsCliContainer(CliCommand): cid: Container ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. - json_mode: Print or dump container in JSON format. - trace: Generate trace ID and print it. session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). - generate_key: Generate a new private key. Returns: Command's result. @@ -184,10 +168,8 @@ class FrostfsCliContainer(CliCommand): def list( self, rpc_endpoint: str, - name: Optional[str] = None, - wallet: Optional[str] = None, + wallet: str, address: Optional[str] = None, - generate_key: Optional[bool] = None, owner: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -199,15 +181,12 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. - name: List containers by the attribute name. owner: Owner of containers (omit to use owner from private key). rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. - trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). - generate_key: Generate a new private key. Returns: Command's result. @@ -220,12 +199,9 @@ class FrostfsCliContainer(CliCommand): def list_objects( self, rpc_endpoint: str, + wallet: str, cid: str, - bearer: Optional[str] = None, - wallet: Optional[str] = None, address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -236,14 +212,11 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. cid: Container ID. - bearer: File with signed JSON or binary encoded bearer token. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. - trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). - generate_key: Generate a new private key. Returns: Command's result. @@ -253,12 +226,11 @@ class FrostfsCliContainer(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) - # TODO Deprecated method with 0.42 def set_eacl( self, rpc_endpoint: str, + wallet: str, cid: str, - wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, table: Optional[str] = None, @@ -290,43 +262,3 @@ class FrostfsCliContainer(CliCommand): "container set-eacl", **{param: value for param, value in locals().items() if param not in ["self"]}, ) - - def search_node( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Show the nodes participating in the container in the current epoch. - - Args: - rpc_endpoint: string Remote host address (as 'multiaddr' or ':') - wallet: WIF (NEP-2) string or path to the wallet or binary key. - cid: Container ID. - address: Address of wallet account. - ttl: TTL value in request meta header (default 2). - from_file: string File path with encoded container - timeout: duration Timeout for the operation (default 15 s) - short: shorten the output of node information. - trace: Generate trace ID and print it. - xhdr: Dict with request X-Headers. - generate_key: Generate a new private key. - - Returns: - - """ - from_str = f"--from {from_file}" if from_file else "" - - return self._execute( - f"container nodes {from_str}", - **{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/control.py b/src/frostfs_testlib/cli/frostfs_cli/control.py deleted file mode 100644 index 957bca9..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/control.py +++ /dev/null @@ -1,232 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliControl(CliCommand): - def set_status( - self, - endpoint: str, - status: str, - wallet: Optional[str] = None, - force: Optional[bool] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Set status of the storage node in FrostFS network map - - Args: - wallet: Path to the wallet or binary key - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - force: Force turning to local maintenance - status: New netmap status keyword ('online', 'offline', 'maintenance') - timeout: Timeout for an operation (default 15s) - - Returns: - Command`s result. - """ - return self._execute( - "control set-status", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def healthcheck( - self, - endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Health check for FrostFS storage nodes - - Args: - wallet: Path to the wallet or binary key - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - timeout: Timeout for an operation (default 15s) - - Returns: - Command`s result. - """ - return self._execute( - "control healthcheck", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def drop_objects( - self, - endpoint: str, - objects: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - wallet: Path to the wallet or binary key - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - objects: List of object addresses to be removed in string format - timeout: Timeout for an operation (default 15s) - - Returns: - Command`s result. - """ - return self._execute( - "control drop-objects", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def add_rule( - self, - endpoint: str, - chain_id: str, - target_name: str, - target_type: str, - rule: Optional[list[str]] = None, - path: Optional[str] = None, - chain_id_hex: Optional[bool] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - address: Address of wallet account - chain-id: Assign ID to the parsed chain - chain-id-hex: Flag to parse chain ID as hex - endpoint: Remote node control address (as 'multiaddr' or ':') - path: Path to encoded chain in JSON or binary format - rule: Rule statement - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "control add-rule", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def get_rule( - self, - endpoint: str, - chain_id: str, - target_name: str, - target_type: str, - chain_id_hex: Optional[bool] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - address string Address of wallet account - chain-id string Chain id - chain-id-hex Flag to parse chain ID as hex - endpoint string Remote node control address (as 'multiaddr' or ':') - target-name string Resource name in APE resource name format - target-type string Resource type(container/namespace) - timeout duration Timeout for an operation (default 15s) - wallet string Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "control get-rule", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list_rules( - self, - endpoint: str, - target_name: str, - target_type: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "control list-rules", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list_targets( - self, - endpoint: str, - chain_name: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - address: Address of wallet account - chain-name: Chain name(ingress|s3) - endpoint: Remote node control address (as 'multiaddr' or ':') - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "control list-targets", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def remove_rule( - self, - endpoint: str, - chain_id: str, - target_name: str, - target_type: str, - all: Optional[bool] = None, - chain_id_hex: Optional[bool] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Drop objects from the node's local storage - - Args: - address: Address of wallet account - all: Remove all chains - chain-id: Assign ID to the parsed chain - chain-id-hex: Flag to parse chain ID as hex - endpoint: Remote node control address (as 'multiaddr' or ':') - target-name: Resource name in APE resource name format - target-type: Resource type(container/namespace) - timeout: Timeout for an operation (default 15s) - wallet: Path to the wallet or binary key - - Returns: - Command`s result. - """ - return self._execute( - "control remove-rule", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index cd197d3..8920893 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -8,11 +8,10 @@ class FrostfsCliNetmap(CliCommand): def epoch( self, rpc_endpoint: str, - wallet: Optional[str] = None, + wallet: str, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -39,11 +38,10 @@ class FrostfsCliNetmap(CliCommand): def netinfo( self, rpc_endpoint: str, - wallet: Optional[str] = None, + wallet: str, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -70,12 +68,11 @@ class FrostfsCliNetmap(CliCommand): def nodeinfo( self, rpc_endpoint: str, - wallet: Optional[str] = None, + wallet: str, address: Optional[str] = None, generate_key: bool = False, json: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -103,11 +100,10 @@ class FrostfsCliNetmap(CliCommand): def snapshot( self, rpc_endpoint: str, - wallet: Optional[str] = None, + wallet: str, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, - trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index e536544..1c1d0ac 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -8,12 +8,11 @@ class FrostfsCliObject(CliCommand): def delete( self, rpc_endpoint: str, + wallet: str, cid: str, oid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, - generate_key: Optional[bool] = None, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -26,7 +25,6 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. - generate_key: Generate new private key. oid: Object ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Filepath to a JSON- or binary-encoded token of the object DELETE session. @@ -46,12 +44,11 @@ class FrostfsCliObject(CliCommand): def get( self, rpc_endpoint: str, + wallet: str, cid: str, oid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, - generate_key: Optional[bool] = None, file: Optional[str] = None, header: Optional[str] = None, no_progress: bool = False, @@ -69,7 +66,6 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. - generate_key: Generate new private key. header: File to write header to. Default: stdout. no_progress: Do not show progress bar. oid: Object ID. @@ -92,12 +88,11 @@ class FrostfsCliObject(CliCommand): def hash( self, rpc_endpoint: str, + wallet: str, cid: str, oid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, - generate_key: Optional[bool] = None, range: Optional[str] = None, salt: Optional[str] = None, ttl: Optional[int] = None, @@ -113,7 +108,6 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. - generate_key: Generate new private key. oid: Object ID. range: Range to take hash from in the form offset1:length1,... rpc_endpoint: Remote node address (as 'multiaddr' or ':'). @@ -130,18 +124,19 @@ class FrostfsCliObject(CliCommand): """ return self._execute( "object hash", - **{param: value for param, value in locals().items() if param not in ["self", "params"]}, + **{ + param: value for param, value in locals().items() if param not in ["self", "params"] + }, ) def head( self, rpc_endpoint: str, + wallet: str, cid: str, oid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, - generate_key: Optional[bool] = None, file: Optional[str] = None, json_mode: bool = False, main_only: bool = False, @@ -160,7 +155,6 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. - generate_key: Generate new private key. json_mode: Marshal output in JSON. main_only: Return only main fields. oid: Object ID. @@ -184,14 +178,13 @@ class FrostfsCliObject(CliCommand): def lock( self, rpc_endpoint: str, + wallet: str, cid: str, oid: str, - wallet: Optional[str] = None, lifetime: Optional[int] = None, expire_at: Optional[int] = None, address: Optional[str] = None, bearer: Optional[str] = None, - generate_key: Optional[bool] = None, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -204,7 +197,6 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. - generate_key: Generate new private key. oid: Object ID. lifetime: Lock lifetime. expire_at: Lock expiration epoch. @@ -226,14 +218,12 @@ class FrostfsCliObject(CliCommand): def put( self, rpc_endpoint: str, + wallet: str, cid: str, file: str, - wallet: Optional[str] = None, address: Optional[str] = None, attributes: Optional[dict] = None, bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - copies_number: Optional[int] = None, disable_filename: bool = False, disable_timestamp: bool = False, expire_at: Optional[int] = None, @@ -251,13 +241,11 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. attributes: User attributes in form of Key1=Value1,Key2=Value2. bearer: File with signed JSON or binary encoded bearer token. - copies_number: Number of copies of the object to store within the RPC call. cid: Container ID. disable_filename: Do not set well-known filename attribute. disable_timestamp: Do not set well-known timestamp attribute. expire_at: Last epoch in the life of the object. file: File with object payload. - generate_key: Generate new private key. no_progress: Do not show progress bar. notify: Object notification in the form of *epoch*:*topic*; '-' topic means using default. @@ -276,64 +264,15 @@ class FrostfsCliObject(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) - def patch( - self, - rpc_endpoint: str, - cid: str, - oid: str, - range: list[str] = None, - payload: list[str] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - trace: bool = False, - ttl: Optional[int] = None, - wallet: Optional[str] = None, - xhdr: Optional[dict] = None, - ) -> CommandResult: - """ - PATCH an object. - - Args: - rpc_endpoint: Remote node address (as 'multiaddr' or ':') - cid: Container ID - oid: Object ID - range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] - payload: An array of file paths to be applied in each range - new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2 - replace_attrs: Replace all attributes completely with new ones specified in new_attrs - address: Address of wallet account - bearer: File with signed JSON or binary encoded bearer token - generate_key: Generate new private key - session: Filepath to a JSON- or binary-encoded token of the object RANGE session - timeout: Timeout for the operation - trace: Generate trace ID and print it - ttl: TTL value in request meta header (default 2) - wallet: WIF (NEP-2) string or path to the wallet or binary key - xhdr: Dict with request X-Headers - - Returns: - Command's result. - """ - return self._execute( - "object patch", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - def range( self, rpc_endpoint: str, + wallet: str, cid: str, oid: str, range: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, - generate_key: Optional[bool] = None, file: Optional[str] = None, json_mode: bool = False, raw: bool = False, @@ -350,7 +289,6 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. - generate_key: Generate new private key. json_mode: Marshal output in JSON. oid: Object ID. range: Range to take data from in the form offset:length. @@ -373,11 +311,10 @@ class FrostfsCliObject(CliCommand): def search( self, rpc_endpoint: str, + wallet: str, cid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, - generate_key: Optional[bool] = None, filters: Optional[list] = None, oid: Optional[str] = None, phy: bool = False, @@ -395,7 +332,6 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. filters: Repeated filter expressions or files with protobuf JSON. - generate_key: Generate new private key. oid: Object ID. phy: Search physically stored objects. root: Search for user objects. @@ -413,46 +349,3 @@ class FrostfsCliObject(CliCommand): "object search", **{param: value for param, value in locals().items() if param not in ["self"]}, ) - - def nodes( - self, - rpc_endpoint: str, - cid: str, - oid: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Search object nodes. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - generate_key: Generate new private key. - oid: Object ID. - trace: Generate trace ID and print it. - root: Search for user objects. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - verify_presence_all: Verify the actual presence of the object on all netmap nodes. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - return self._execute( - "object nodes", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/session.py b/src/frostfs_testlib/cli/frostfs_cli/session.py index 857b13e..e21cc23 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/session.py +++ b/src/frostfs_testlib/cli/frostfs_cli/session.py @@ -9,6 +9,7 @@ class FrostfsCliSession(CliCommand): self, rpc_endpoint: str, wallet: str, + wallet_password: str, out: str, lifetime: Optional[int] = None, address: Optional[str] = None, @@ -29,7 +30,12 @@ class FrostfsCliSession(CliCommand): Returns: Command's result. """ - return self._execute( + return self._execute_with_password( "session create", - **{param: value for param, value in locals().items() if param not in ["self"]}, + wallet_password, + **{ + param: value + for param, value in locals().items() + if param not in ["self", "wallet_password"] + }, ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 68a2f54..6b47ac2 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -39,10 +39,10 @@ class FrostfsCliShards(CliCommand): def set_mode( self, endpoint: str, + wallet: str, + wallet_password: str, mode: str, - id: Optional[list[str]] = None, - wallet: Optional[str] = None, - wallet_password: Optional[str] = None, + id: Optional[list[str]], address: Optional[str] = None, all: bool = False, clear_errors: bool = False, @@ -65,15 +65,14 @@ class FrostfsCliShards(CliCommand): Returns: Command's result. """ - if not wallet_password: - return self._execute( - "control shards set-mode", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) return self._execute_with_password( "control shards set-mode", wallet_password, - **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, + **{ + param: value + for param, value in locals().items() + if param not in ["self", "wallet_password"] + }, ) def dump( @@ -106,14 +105,18 @@ class FrostfsCliShards(CliCommand): return self._execute_with_password( "control shards dump", wallet_password, - **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, + **{ + param: value + for param, value in locals().items() + if param not in ["self", "wallet_password"] + }, ) def list( self, endpoint: str, - wallet: Optional[str] = None, - wallet_password: Optional[str] = None, + wallet: str, + wallet_password: str, address: Optional[str] = None, json_mode: bool = False, timeout: Optional[str] = None, @@ -132,130 +135,12 @@ class FrostfsCliShards(CliCommand): Returns: Command's result. """ - if not wallet_password: - return self._execute( - "control shards list", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) return self._execute_with_password( "control shards list", wallet_password, - **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, - ) - - def evacuation_start( - self, - endpoint: str, - id: Optional[str] = None, - scope: Optional[str] = None, - all: bool = False, - no_errors: bool = True, - await_mode: bool = False, - address: Optional[str] = None, - timeout: Optional[str] = None, - no_progress: bool = False, - ) -> CommandResult: - """ - Objects evacuation from shard to other shards. - - Args: - address: Address of wallet account - all: Process all shards - await: Block execution until evacuation is completed - endpoint: Remote node control address (as 'multiaddr' or ':') - id: List of shard IDs in base58 encoding - no_errors: Skip invalid/unreadable objects (default true) - no_progress: Print progress if await provided - scope: Evacuation scope; possible values: trees, objects, all (default "all") - timeout: Timeout for an operation (default 15s) - - Returns: - Command's result. - """ - return self._execute( - "control shards evacuation start", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def evacuation_reset( - self, - endpoint: str, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Reset evacuate objects from shard to other shards status. - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - timeout: Timeout for an operation (default 15s) - Returns: - Command's result. - """ - return self._execute( - "control shards evacuation reset", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def evacuation_stop( - self, - endpoint: str, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Stop running evacuate process from shard to other shards. - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - timeout: Timeout for an operation (default 15s) - - Returns: - Command's result. - """ - return self._execute( - "control shards evacuation stop", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def evacuation_status( - self, - endpoint: str, - address: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """ - Get evacuate objects from shard to other shards status. - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - timeout: Timeout for an operation (default 15s) - - Returns: - Command's result. - """ - return self._execute( - "control shards evacuation status", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None): - """ - Detach and close the shards - - Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') - id: List of shard IDs in base58 encoding - timeout: Timeout for an operation (default 15s) - - Returns: - Command's result. - """ - return self._execute( - "control shards detach", - **{param: value for param, value in locals().items() if param not in ["self"]}, + **{ + param: value + for param, value in locals().items() + if param not in ["self", "wallet_password"] + }, ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/tree.py b/src/frostfs_testlib/cli/frostfs_cli/tree.py deleted file mode 100644 index c75b526..0000000 --- a/src/frostfs_testlib/cli/frostfs_cli/tree.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import Optional - -from frostfs_testlib.cli.cli_command import CliCommand -from frostfs_testlib.shell import CommandResult - - -class FrostfsCliTree(CliCommand): - def healthcheck( - self, - wallet: Optional[str] = None, - rpc_endpoint: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Get internal balance of FrostFS account - - Args: - address: Address of wallet account. - owner: Owner of balance account (omit to use owner from private key). - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - - Returns: - Command's result. - - """ - return self._execute( - "tree healthcheck", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) - - def list( - self, - cid: str, - rpc_endpoint: Optional[str] = None, - wallet: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult: - """Get Tree List - - Args: - cid: Container ID. - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - timeout: duration Timeout for the operation (default 15 s) - - Returns: - Command's result. - - """ - return self._execute( - "tree list", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/util.py b/src/frostfs_testlib/cli/frostfs_cli/util.py index 37347a5..99acd0a 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/util.py +++ b/src/frostfs_testlib/cli/frostfs_cli/util.py @@ -6,12 +6,12 @@ from frostfs_testlib.shell import CommandResult class FrostfsCliUtil(CliCommand): def sign_bearer_token( - self, - from_file: str, - to_file: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - json: Optional[bool] = False, + self, + wallet: str, + from_file: str, + to_file: str, + address: Optional[str] = None, + json: Optional[bool] = False, ) -> CommandResult: """ Sign bearer token to use it in requests. @@ -33,9 +33,9 @@ class FrostfsCliUtil(CliCommand): def sign_session_token( self, + wallet: str, from_file: str, to_file: str, - wallet: Optional[str] = None, address: Optional[str] = None, ) -> CommandResult: """ @@ -54,11 +54,3 @@ class FrostfsCliUtil(CliCommand): "util sign session-token", **{param: value for param, value in locals().items() if param not in ["self"]}, ) - - def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False): - """Convert representation of extended ACL table.""" - - return self._execute( - "util convert eacl", - **{param: value for param, value in locals().items() if param not in ["self"]}, - ) diff --git a/src/frostfs_testlib/cli/generic_cli.py b/src/frostfs_testlib/cli/generic_cli.py deleted file mode 100644 index 2a80159..0000000 --- a/src/frostfs_testlib/cli/generic_cli.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import Optional - -from frostfs_testlib.hosting.interfaces import Host -from frostfs_testlib.shell.interfaces import CommandOptions, Shell - - -class GenericCli(object): - def __init__(self, cli_name: str, host: Host) -> None: - self.host = host - self.cli_name = cli_name - - def __call__( - self, - args: Optional[str] = "", - pipes: Optional[str] = "", - shell: Optional[Shell] = None, - options: Optional[CommandOptions] = None, - ): - if not shell: - shell = self.host.get_shell() - - cli_config = self.host.get_cli_config(self.cli_name, True) - extra_args = "" - exec_path = self.cli_name - if cli_config: - extra_args = " ".join(cli_config.extra_args) - exec_path = cli_config.exec_path - - cmd = f"{exec_path} {args} {extra_args} {pipes}" - return shell.exec(cmd, options) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py deleted file mode 100644 index 2c97c3a..0000000 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ /dev/null @@ -1,102 +0,0 @@ -import re - -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus - - -class NetmapParser: - @staticmethod - def netinfo(output: str) -> NodeNetInfo: - regexes = { - "epoch": r"Epoch: (?P\d+)", - "network_magic": r"Network magic: (?P.*$)", - "time_per_block": r"Time per block: (?P\d+\w+)", - "container_fee": r"Container fee: (?P\d+)", - "epoch_duration": r"Epoch duration: (?P\d+)", - "inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P\d+)", - "maximum_object_size": r"Maximum object size: (?P\d+)", - "maximum_count_of_data_shards": r"Maximum count of data shards: (?P\d+)", - "maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P\d+)", - "withdrawal_fee": r"Withdrawal fee: (?P\d+)", - "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", - "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", - } - parse_result = {} - - for key, regex in regexes.items(): - search_result = re.search(regex, output, flags=re.MULTILINE) - if search_result == None: - parse_result[key] = None - continue - parse_result[key] = search_result[key].strip() - - node_netinfo = NodeNetInfo(**parse_result) - - return node_netinfo - - @staticmethod - def snapshot_all_nodes(output: str) -> list[NodeNetmapInfo]: - """The code will parse each line and return each node as dataclass.""" - netmap_nodes = output.split("Node ")[1:] - dataclasses_netmap = [] - result_netmap = {} - - regexes = { - "node_id": r"\d+: (?P\w+)", - "node_data_ips": r"(?P/ip4/.+?)$", - "node_status": r"(?PONLINE|MAINTENANCE|OFFLINE)", - "cluster_name": r"ClusterName: (?P\w+)", - "continent": r"Continent: (?P\w+)", - "country": r"Country: (?P\w+)", - "country_code": r"CountryCode: (?P\w+)", - "external_address": r"ExternalAddr: (?P/ip[4].+?)$", - "location": r"Location: (?P\w+.*)", - "node": r"Node: (?P\d+\.\d+\.\d+\.\d+)", - "price": r"Price: (?P\d+)", - "sub_div": r"SubDiv: (?P.*)", - "sub_div_code": r"SubDivCode: (?P\w+)", - "un_locode": r"UN-LOCODE: (?P\w+.*)", - "role": r"role: (?P\w+)", - } - - for node in netmap_nodes: - for key, regex in regexes.items(): - search_result = re.search(regex, node, flags=re.MULTILINE) - if search_result is None: - result_netmap[key] = None - continue - if key == "node_data_ips": - result_netmap[key] = search_result[key].strip().split(" ") - continue - if key == "external_address": - result_netmap[key] = search_result[key].strip().split(",") - continue - if key == "node_status": - result_netmap[key] = NodeStatus(search_result[key].strip().lower()) - continue - result_netmap[key] = search_result[key].strip() - - dataclasses_netmap.append(NodeNetmapInfo(**result_netmap)) - - return dataclasses_netmap - - @staticmethod - def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None: - snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - for snapshot in snapshot_nodes: - for endpoint in snapshot.external_address: - if rpc_endpoint.split(":")[0] in endpoint: - return snapshot - - @staticmethod - def node_info(output: dict) -> NodeNetmapInfo: - data_dict = {"attributes": {}} - - for key, value in output.items(): - if key != "attributes": - data_dict[key] = value - - for attribute in output["attributes"]: - data_dict["attributes"][attribute["key"]] = attribute["value"] - - return NodeInfo(**data_dict) diff --git a/src/frostfs_testlib/clients/__init__.py b/src/frostfs_testlib/clients/__init__.py deleted file mode 100644 index e46766b..0000000 --- a/src/frostfs_testlib/clients/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from frostfs_testlib.clients.http.http_client import HttpClient -from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper -from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient diff --git a/src/frostfs_testlib/clients/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py deleted file mode 100644 index ab6e2b0..0000000 --- a/src/frostfs_testlib/clients/http/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from frostfs_testlib.clients.http.http_client import HttpClient diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py deleted file mode 100644 index aebd5ef..0000000 --- a/src/frostfs_testlib/clients/http/http_client.py +++ /dev/null @@ -1,145 +0,0 @@ -import io -import json -import logging -import logging.config -from typing import Mapping, Sequence - -import httpx - -from frostfs_testlib import reporter - -timeout = httpx.Timeout(60, read=150) -LOGGING_CONFIG = { - "disable_existing_loggers": False, - "version": 1, - "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, - "formatters": { - "http": { - "format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s", - "datefmt": "%Y-%m-%d %H:%M:%S", - } - }, - "loggers": { - "httpx": { - "handlers": ["default"], - "level": "DEBUG", - }, - "httpcore": { - "handlers": ["default"], - "level": "ERROR", - }, - }, -} - -logging.config.dictConfig(LOGGING_CONFIG) -logger = logging.getLogger("NeoLogger") - - -class HttpClient: - @reporter.step("Send {method} request to {url}") - def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response: - transport = httpx.HTTPTransport(verify=False, retries=5) - client = httpx.Client(timeout=timeout, transport=transport) - response = client.request(method, url, **kwargs) - - self._attach_response(response, **kwargs) - logger.info(f"Response: {response.status_code} => {response.text}") - - if expected_status_code: - assert ( - response.status_code == expected_status_code - ), f"Got {response.status_code} response code while {expected_status_code} expected" - - return response - - @classmethod - def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None: - try: - content = readable.read() - except Exception as e: - logger.warning(f"Unable to read file: {str(e)}") - return None - - if not content: - return None - - request_body = None - - try: - request_body = json.loads(content) - except (json.JSONDecodeError, UnicodeDecodeError) as e: - logger.warning(f"Unable to convert body to json: {str(e)}") - - if request_body is not None: - return json.dumps(request_body, default=str, indent=4) - - try: - request_body = content.decode() - except UnicodeDecodeError as e: - logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}") - - request_body = content if request_body is None else request_body - request_body = "" if len(request_body) > 1000 else request_body - - return request_body - - @classmethod - def _parse_files(cls, files: Mapping | Sequence | None) -> dict: - filepaths = {} - - if not files: - return filepaths - - if isinstance(files, Sequence): - items = files - elif isinstance(files, Mapping): - items = files.items() - else: - raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}") - - for name, file in items: - if isinstance(file, io.IOBase): - filepaths[name] = file.name - elif isinstance(file, Sequence): - filepaths[name] = file[1].name - - return filepaths - - @classmethod - def _attach_response(cls, response: httpx.Response, **kwargs): - request = response.request - request_headers = json.dumps(dict(request.headers), default=str, indent=4) - request_body = cls._parse_body(request) - - files = kwargs.get("files") - request_files = cls._parse_files(files) - - response_headers = json.dumps(dict(response.headers), default=str, indent=4) - response_body = cls._parse_body(response) - - report = ( - f"Method: {request.method}\n\n" - + f"URL: {request.url}\n\n" - + f"Request Headers: {request_headers}\n\n" - + (f"Request Body: {request_body}\n\n" if request_body else "") - + (f"Request Files: {request_files}\n\n" if request_files else "") - + f"Response Status Code: {response.status_code}\n\n" - + f"Response Headers: {response_headers}\n\n" - + (f"Response Body: {response_body}\n\n" if response_body else "") - ) - curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files) - - reporter.attach(report, "Requests Info") - reporter.attach(curl_request, "CURL") - - @classmethod - def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: - excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"} - headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers) - - data = f" -d '{data}'" if data else "" - for name, path in files.items(): - data += f' -F "{name}=@{path}"' - - # Option -k means no verify SSL - return f"curl {url} -X {method} {headers}{data} -k" diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py deleted file mode 100644 index 5481f48..0000000 --- a/src/frostfs_testlib/clients/s3/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py deleted file mode 100644 index a2e3fc7..0000000 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ /dev/null @@ -1,1539 +0,0 @@ -import json -import logging -import os -from datetime import datetime -from time import sleep -from typing import Literal, Optional, Union - -from frostfs_testlib import reporter -from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict -from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.shell import CommandOptions -from frostfs_testlib.shell.local_shell import LocalShell -from frostfs_testlib.utils import string_utils - -# TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import _configure_aws_cli -from frostfs_testlib.utils.file_utils import TestFile - -logger = logging.getLogger("NeoLogger") -command_options = CommandOptions(timeout=480) - - -class AwsCliClient(S3ClientWrapper): - __repr_name__: str = "AWS CLI" - - # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed - # certificate in devenv) and disable automatic pagination in CLI output - common_flags = "--no-verify-ssl --no-paginate" - s3gate_endpoint: str - - @reporter.step("Configure S3 client (aws cli)") - def __init__( - self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" - ) -> None: - self.s3gate_endpoint = s3gate_endpoint - self.iam_endpoint = None - - self.access_key_id: str = access_key_id - self.secret_access_key: str = secret_access_key - self.profile = profile - self.region = region - - self.local_shell = LocalShell() - try: - _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) - self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") - self.local_shell.exec( - f"aws configure set retry_mode {RETRY_MODE} --profile {profile}", - ) - except Exception as err: - raise RuntimeError("Error while configuring AwsCliClient") from err - - @reporter.step("Set S3 endpoint to {s3gate_endpoint}") - def set_endpoint(self, s3gate_endpoint: str): - self.s3gate_endpoint = s3gate_endpoint - - @reporter.step("Set IAM endpoint to {iam_endpoint}") - def set_iam_endpoint(self, iam_endpoint: str): - self.iam_endpoint = iam_endpoint - - @reporter.step("Create bucket S3") - def create_bucket( - self, - bucket: Optional[str] = None, - object_lock_enabled_for_bucket: Optional[bool] = None, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - grant_full_control: Optional[str] = None, - location_constraint: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = string_utils.unique_name("bucket-") - - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - if object_lock_enabled_for_bucket is None: - object_lock = "" - elif object_lock_enabled_for_bucket: - object_lock = " --object-lock-enabled-for-bucket" - else: - object_lock = " --no-object-lock-enabled-for-bucket" - cmd = ( - f"aws {self.common_flags} s3api create-bucket --bucket {bucket} " - f"{object_lock} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if grant_full_control: - cmd += f" --grant-full-control {grant_full_control}" - if grant_write: - cmd += f" --grant-write {grant_write}" - if grant_read: - cmd += f" --grant-read {grant_read}" - if location_constraint: - cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" - self.local_shell.exec(cmd) - - return bucket - - @reporter.step("List buckets S3") - def list_buckets(self) -> list[str]: - cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - buckets_json = self._to_json(output) - return [bucket["Name"] for bucket in buckets_json["Buckets"]] - - @reporter.step("Delete bucket S3") - def delete_bucket(self, bucket: str) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - self.local_shell.exec(cmd, command_options) - - @reporter.step("Head bucket S3") - def head_bucket(self, bucket: str) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - self.local_shell.exec(cmd) - - @reporter.step("Put bucket versioning status") - def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " - f"--versioning-configuration Status={status.value} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get bucket versioning status") - def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Status") - - @reporter.step("Put bucket tagging") - def put_bucket_tagging(self, bucket: str, tags: list) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} - cmd = ( - f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " - f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get bucket tagging") - def get_bucket_tagging(self, bucket: str) -> list: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("TagSet") - - @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - return self._to_json(output) - - @reporter.step("Get bucket location") - def get_bucket_location(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("LocationConstraint") - - @reporter.step("List objects S3") - def list_objects( - self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None - ) -> Union[dict, list[str]]: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} " - if page_size: - cmd = cmd.replace("--no-paginate", "") - cmd += f" --page-size {page_size} " - if prefix: - cmd += f" --prefix {prefix}" - if self.profile: - cmd += f" --profile {self.profile} " - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - - return response if full_output else obj_list - - @reporter.step("List objects S3 v2") - def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - - return response if full_output else obj_list - - @reporter.step("List objects versions S3") - def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response if full_output else response.get("Versions", []) - - @reporter.step("List objects delete markers S3") - def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response if full_output else response.get("DeleteMarkers", []) - - @reporter.step("Copy object S3") - def copy_object( - self, - source_bucket: str, - source_key: str, - bucket: Optional[str] = None, - key: Optional[str] = None, - acl: Optional[str] = None, - metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, - metadata: Optional[dict] = None, - tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, - tagging: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = source_bucket - - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - if key is None: - key = string_utils.unique_name("copy-object-") - - copy_source = f"{source_bucket}/{source_key}" - - cmd = ( - f"aws {self.common_flags} s3api copy-object --copy-source {copy_source} " - f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if metadata_directive: - cmd += f" --metadata-directive {metadata_directive}" - if metadata: - cmd += " --metadata " - for meta_key, value in metadata.items(): - cmd += f" {meta_key}={value}" - if tagging_directive: - cmd += f" --tagging-directive {tagging_directive}" - if tagging: - cmd += f" --tagging {tagging}" - self.local_shell.exec(cmd, command_options) - return key - - @reporter.step("Put object S3") - def put_object( - self, - bucket: str, - filepath: str, - key: Optional[str] = None, - metadata: Optional[dict] = None, - tagging: Optional[str] = None, - acl: Optional[str] = None, - object_lock_mode: Optional[str] = None, - object_lock_retain_until_date: Optional[datetime] = None, - object_lock_legal_hold_status: Optional[str] = None, - grant_full_control: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> str: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - if key is None: - key = os.path.basename(filepath) - - cmd = ( - f"aws {self.common_flags} s3api put-object --bucket {bucket} --key {key} " - f"--body {filepath} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if metadata: - cmd += " --metadata" - for key, value in metadata.items(): - cmd += f" {key}={value}" - if tagging: - cmd += f" --tagging '{tagging}'" - if acl: - cmd += f" --acl {acl}" - if object_lock_mode: - cmd += f" --object-lock-mode {object_lock_mode}" - if object_lock_retain_until_date: - cmd += f' --object-lock-retain-until-date "{object_lock_retain_until_date}"' - if object_lock_legal_hold_status: - cmd += f" --object-lock-legal-hold-status {object_lock_legal_hold_status}" - if grant_full_control: - cmd += f" --grant-full-control '{grant_full_control}'" - if grant_read: - cmd += f" --grant-read {grant_read}" - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - return response.get("VersionId") - - @reporter.step("Head object S3") - def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @reporter.step("Get object S3") - def get_object( - self, - bucket: str, - key: str, - version_id: Optional[str] = None, - object_range: Optional[tuple[int, int]] = None, - full_output: bool = False, - ) -> dict | TestFile: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " - f"{version} {test_file} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if object_range: - cmd += f" --range bytes={object_range[0]}-{object_range[1]}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response if full_output else test_file - - @reporter.step("Get object ACL") - def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Grants") - - @reporter.step("Put object ACL") - def put_object_acl( - self, - bucket: str, - key: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> list: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " - f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if grant_write: - cmd += f" --grant-write {grant_write}" - if grant_read: - cmd += f" --grant-read {grant_read}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Grants") - - @reporter.step("Put bucket ACL") - def put_bucket_acl( - self, - bucket: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " - f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if acl: - cmd += f" --acl {acl}" - if grant_write: - cmd += f" --grant-write {grant_write}" - if grant_read: - cmd += f" --grant-read {grant_read}" - self.local_shell.exec(cmd) - - @reporter.step("Delete objects S3") - def delete_objects(self, bucket: str, keys: list[str]) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") - delete_structure = json.dumps(_make_objs_dict(keys)) - with open(file_path, "w") as out_file: - out_file.write(delete_structure) - logger.info(f"Input file for delete-objects: {delete_structure}") - - cmd = ( - f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " - f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - return response - - @reporter.step("Delete object S3") - def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api delete-object --bucket {bucket} " - f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - return self._to_json(output) - - @reporter.step("Delete object versions S3") - def delete_object_versions(self, bucket: str, object_versions: list) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - # Build deletion list in S3 format - delete_list = { - "Objects": [ - { - "Key": object_version["Key"], - "VersionId": object_version["VersionId"], - } - for object_version in object_versions - ] - } - - file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") - delete_structure = json.dumps(delete_list) - with open(file_path, "w") as out_file: - out_file.write(delete_structure) - logger.info(f"Input file for delete-objects: {delete_structure}") - - cmd = ( - f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " - f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - return self._to_json(output) - - @reporter.step("Delete object versions S3 without delete markers") - def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - # Delete objects without creating delete markers - for object_version in object_versions: - self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) - - @reporter.step("Get object attributes") - def get_object_attributes( - self, - bucket: str, - key: str, - attributes: list[str], - version_id: str = "", - max_parts: int = 0, - part_number: int = 0, - full_output: bool = True, - ) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - attrs = ",".join(attributes) - version = f" --version-id {version_id}" if version_id else "" - parts = f"--max-parts {max_parts}" if max_parts else "" - part_number_str = f"--part-number-marker {part_number}" if part_number else "" - cmd = ( - f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " - f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - for attr in attributes: - assert attr in response, f"Expected attribute {attr} in {response}" - - if full_output: - return response - else: - return response.get(attributes[0]) - - @reporter.step("Get bucket policy") - def get_bucket_policy(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Policy") - - @reporter.step("Delete bucket policy") - def delete_bucket_policy(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @reporter.step("Put bucket policy") - def put_bucket_policy(self, bucket: str, policy: dict) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - # Leaving it as is was in test repo. Double dumps to escape resulting string - # Example: - # policy = {"a": 1} - # json.dumps(policy) => {"a": 1} - # json.dumps(json.dumps(policy)) => "{\"a\": 1}" - # TODO: update this - dumped_policy = json.dumps(json.dumps(policy)) - cmd = ( - f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " - f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get bucket cors") - def get_bucket_cors(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("CORSRules") - - @reporter.step("Put bucket cors") - def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " - f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Delete bucket cors") - def delete_bucket_cors(self, bucket: str) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Delete bucket tagging") - def delete_bucket_tagging(self, bucket: str) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Put object retention") - def put_object_retention( - self, - bucket: str, - key: str, - retention: dict, - version_id: Optional[str] = None, - bypass_governance_retention: Optional[bool] = None, - ) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " - f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - if bypass_governance_retention is not None: - cmd += " --bypass-governance-retention" - self.local_shell.exec(cmd) - - @reporter.step("Put object legal hold") - def put_object_legal_hold( - self, - bucket: str, - key: str, - legal_hold_status: Literal["ON", "OFF"], - version_id: Optional[str] = None, - ) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - legal_hold = json.dumps({"Status": legal_hold_status}) - cmd = ( - f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " - f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Put object tagging") - def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - tagging = {"TagSet": tags} - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " - f"{version} --tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Get object tagging") - def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("TagSet") - - @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - version = f" --version-id {version_id}" if version_id else "" - cmd = ( - f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " - f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Sync directory S3") - def sync( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - if metadata: - cmd += " --metadata" - for key, value in metadata.items(): - cmd += f" {key}={value}" - if acl: - cmd += f" --acl {acl}" - output = self.local_shell.exec(cmd, command_options).stdout - return self._to_json(output) - - @reporter.step("CP directory S3") - def cp( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " - f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}" - ) - if metadata: - cmd += " --metadata" - for key, value in metadata.items(): - cmd += f" {key}={value}" - if acl: - cmd += f" --acl {acl}" - output = self.local_shell.exec(cmd, command_options).stdout - return self._to_json(output) - - @reporter.step("Create multipart upload S3") - def create_multipart_upload(self, bucket: str, key: str) -> str: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " - f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" - - return response["UploadId"] - - @reporter.step("List multipart uploads S3") - def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Uploads") - - @reporter.step("Abort multipart upload S3") - def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " - f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - self.local_shell.exec(cmd) - - @reporter.step("Upload part S3") - def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - assert response.get("ETag"), f"Expected ETag in response:\n{response}" - return response["ETag"] - - @reporter.step("Upload copy part S3") - def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd, command_options).stdout - response = self._to_json(output) - assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" - - return response["CopyPartResult"]["ETag"] - - @reporter.step("List parts S3") - def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Parts"), f"Expected Parts in response:\n{response}" - - return response["Parts"] - - @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") - parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} - - with open(file_path, "w") as out_file: - out_file.write(json.dumps(parts_dict)) - - logger.info(f"Input file for complete-multipart-upload: {json.dumps(parts_dict)}") - - cmd = ( - f"aws {self.common_flags} s3api complete-multipart-upload --bucket {bucket} " - f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Put object lock configuration") - def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " - f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - return self._to_json(output) - - @reporter.step("Get object lock configuration") - def get_object_lock_configuration(self, bucket: str): - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("ObjectLockConfiguration") - - @reporter.step("Put bucket lifecycle configuration") - def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @reporter.step("Get bucket lifecycle configuration") - def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @reporter.step("Delete bucket lifecycle configuration") - def delete_bucket_lifecycle(self, bucket: str) -> dict: - if bucket.startswith("-") or " " in bucket: - bucket = f'"{bucket}"' - - cmd = ( - f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" - ) - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response - - @staticmethod - def _to_json(output: str) -> dict: - json_output = {} - if "{" not in output and "}" not in output: - logger.warning(f"Could not parse json from output {output}") - return json_output - - json_output = json.loads(output[output.index("{") :]) - - return json_output - - # IAM METHODS # - # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) - - @reporter.step("Adds the specified user to the specified group") - def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Attaches the specified managed policy to the specified IAM group") - def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Attaches the specified managed policy to the specified user") - def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") - def iam_create_access_key(self, user_name: Optional[str] = None) -> dict: - cmd = f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - if user_name: - cmd += f" --user-name {user_name}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - access_key_id = response["AccessKey"].get("AccessKeyId") - secret_access_key = response["AccessKey"].get("SecretAccessKey") - assert access_key_id, f"Expected AccessKeyId in response:\n{response}" - assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" - - return access_key_id, secret_access_key - - @reporter.step("Creates a new group") - def iam_create_group(self, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Group"), f"Expected Group in response:\n{response}" - assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - - return response - - @reporter.step("Creates a new managed policy for your AWS account") - def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - cmd = ( - f"aws {self.common_flags} iam create-policy --endpoint {self.iam_endpoint}" - f" --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" - ) - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" - - return response - - @reporter.step("Creates a new IAM user for your AWS account") - def iam_create_user(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("User"), f"Expected User in response:\n{response}" - assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" - - return response - - @reporter.step("Deletes the access key pair associated with the specified IAM user") - def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes the specified IAM group") - def iam_delete_group(self, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") - def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes the specified managed policy") - def iam_delete_policy(self, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes the specified IAM user") - def iam_delete_user(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") - def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Removes the specified managed policy from the specified IAM group") - def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Removes the specified managed policy from the specified user") - def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Returns a list of IAM users that are in the specified IAM group") - def iam_get_group(self, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "Users" in response.keys(), f"Expected Users in response:\n{response}" - assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - - return response - - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") - def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - cmd = f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Retrieves information about the specified managed policy") - def iam_get_policy(self, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" - - return response - - @reporter.step("Retrieves information about the specified version of the specified managed policy") - def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - cmd = f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" - assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" - - return response - - @reporter.step("Retrieves information about the specified IAM user") - def iam_get_user(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("User"), f"Expected User in response:\n{response}" - assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" - - return response - - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") - def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - cmd = f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("UserName"), f"Expected User in response:\n{response}" - - return response - - @reporter.step("Returns information about the access key IDs associated with the specified IAM user") - def iam_list_access_keys(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Lists all managed policies that are attached to the specified IAM group") - def iam_list_attached_group_policies(self, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" - - return response - - @reporter.step("Lists all managed policies that are attached to the specified IAM user") - def iam_list_attached_user_policies(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" - - return response - - @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") - def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" - assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" - - return response - - @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") - def iam_list_group_policies(self, group_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" - - return response - - @reporter.step("Lists the IAM groups") - def iam_list_groups(self) -> dict: - cmd = f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" - - return response - - @reporter.step("Lists the IAM groups that the specified IAM user belongs to") - def iam_list_groups_for_user(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" - - return response - - @reporter.step("Lists all the managed policies that are available in your AWS account") - def iam_list_policies(self) -> dict: - cmd = f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "Policies" in response.keys(), f"Expected Policies in response:\n{response}" - - return response - - @reporter.step("Lists information about the versions of the specified managed policy") - def iam_list_policy_versions(self, policy_arn: str) -> dict: - cmd = f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert response.get("Versions"), f"Expected Versions in response:\n{response}" - - return response - - @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") - def iam_list_user_policies(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" - - return response - - @reporter.step("Lists the IAM users") - def iam_list_users(self) -> dict: - cmd = f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - assert "Users" in response.keys(), f"Expected Users in response:\n{response}" - - return response - - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") - def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - cmd = ( - f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}" - f" --group-name {group_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" - ) - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") - def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - cmd = ( - f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}" - f" --user-name {user_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" - ) - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 14) - - return response - - @reporter.step("Removes the specified user from the specified group") - def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam remove-user-from-group --endpoint {self.iam_endpoint}" - f" --group-name {group_name} --user-name {user_name}" - ) - if self.profile: - cmd += f" --profile {self.profile}" - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Updates the name and/or the path of the specified IAM group") - def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - cmd = f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" - if new_name: - cmd += f" --new-group-name {new_name}" - if new_path: - cmd += f" --new-path {new_path}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Updates the name and/or the path of the specified IAM user") - def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - cmd = f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" - if new_name: - cmd += f" --new-user-name {new_name}" - if new_path: - cmd += f" --new-path {new_path}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Adds one or more tags to an IAM user") - def iam_tag_user(self, user_name: str, tags: list) -> dict: - tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - cmd = ( - f"aws {self.common_flags} iam tag-user --user-name {user_name} --tags '{json.dumps(tags_json)}' --endpoint {self.iam_endpoint}" - ) - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("List tags of IAM user") - def iam_list_user_tags(self, user_name: str) -> dict: - cmd = f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Removes the specified tags from the user") - def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - tag_keys_joined = " ".join(tag_keys) - cmd = f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - # MFA METHODS - @reporter.step("Creates a new virtual MFA device") - def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple: - cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\ - --outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}" - - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") - assert serial_number, f"Expected SerialNumber in response:\n{response}" - - return serial_number, False - - @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") - def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: - cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Deletes a virtual MFA device") - def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: - cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") - def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: - cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\ - --authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - - return response - - @reporter.step("Lists the MFA devices for an IAM user") - def iam_list_virtual_mfa_devices(self) -> dict: - cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" - - return response - - @reporter.step("Get session token for user") - def sts_get_session_token( - self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None - ) -> tuple: - cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}" - if duration_seconds: - cmd += f" --duration-seconds {duration_seconds}" - if serial_number: - cmd += f" --serial-number {serial_number}" - if token_code: - cmd += f" --token-code {token_code}" - if self.profile: - cmd += f" --profile {self.profile}" - - output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - access_key = response.get("Credentials", {}).get("AccessKeyId") - secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") - session_token = response.get("Credentials", {}).get("SessionToken") - assert access_key, f"Expected AccessKeyId in response:\n{response}" - assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" - assert session_token, f"Expected SessionToken in response:\n{response}" - - return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py deleted file mode 100644 index 4157bd6..0000000 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ /dev/null @@ -1,1343 +0,0 @@ -import json -import logging -import os -from collections.abc import Callable -from datetime import datetime -from time import sleep -from typing import Literal, Optional, Union - -import boto3 -import urllib3 -from botocore.config import Config -from botocore.exceptions import ClientError -from mypy_boto3_s3 import S3Client - -from frostfs_testlib import reporter -from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict -from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.utils import string_utils - -# TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import log_command_execution -from frostfs_testlib.utils.file_utils import TestFile - -logger = logging.getLogger("NeoLogger") - -# Disable warnings on self-signed certificate which the -# boto library produces on requests to S3-gate in dev-env -urllib3.disable_warnings() - - -class Boto3ClientWrapper(S3ClientWrapper): - __repr_name__: str = "Boto3 client" - - @reporter.step("Configure S3 client (boto3)") - def __init__( - self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" - ) -> None: - self.s3gate_endpoint: str = "" - self.boto3_client: S3Client = None - - self.iam_endpoint: str = "" - self.boto3_iam_client: S3Client = None - self.boto3_sts_client: S3Client = None - - self.access_key_id = access_key_id - self.secret_access_key = secret_access_key - self.profile = profile - self.region = region - - self.session = boto3.Session() - self.config = Config(retries={"max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE}) - - self.set_endpoint(s3gate_endpoint) - - @reporter.step("Set endpoint S3 to {s3gate_endpoint}") - def set_endpoint(self, s3gate_endpoint: str): - if self.s3gate_endpoint == s3gate_endpoint: - return - - self.s3gate_endpoint = s3gate_endpoint - - self.boto3_client: S3Client = self.session.client( - service_name="s3", - aws_access_key_id=self.access_key_id, - aws_secret_access_key=self.secret_access_key, - region_name=self.region, - config=self.config, - endpoint_url=s3gate_endpoint, - verify=False, - ) - - @reporter.step("Set endpoint IAM to {iam_endpoint}") - def set_iam_endpoint(self, iam_endpoint: str): - if self.iam_endpoint == iam_endpoint: - return - - self.iam_endpoint = iam_endpoint - - self.boto3_iam_client = self.session.client( - service_name="iam", - aws_access_key_id=self.access_key_id, - aws_secret_access_key=self.secret_access_key, - region_name=self.region, - endpoint_url=self.iam_endpoint, - verify=False, - ) - # since the STS does not have an endpoint, IAM is used - self.boto3_sts_client = self.session.client( - service_name="sts", - aws_access_key_id=self.access_key_id, - aws_secret_access_key=self.secret_access_key, - endpoint_url=iam_endpoint, - verify=False, - ) - - def _to_s3_param(self, param: str) -> str: - replacement_map = { - "Acl": "ACL", - "Cors": "CORS", - "_": "", - } - result = param.title() - for find, replace in replacement_map.items(): - result = result.replace(find, replace) - return result - - def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict: - exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] - return {self._to_s3_param(param): value for param, value in scope.items() if param not in exclude and value is not None} - - def _exec_request(self, method: Callable, params: Optional[dict] = None, **kwargs): - if not params: - params = {} - - try: - result = method(**params) - except ClientError as err: - log_command_execution(method.__name__, err.response, params, **kwargs) - raise - - log_command_execution(method.__name__, result, params, **kwargs) - return result - - # BUCKET METHODS # - @reporter.step("Create bucket S3") - def create_bucket( - self, - bucket: Optional[str] = None, - object_lock_enabled_for_bucket: Optional[bool] = None, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - grant_full_control: Optional[str] = None, - location_constraint: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = string_utils.unique_name("bucket-") - - params = {"Bucket": bucket} - if object_lock_enabled_for_bucket is not None: - params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) - - if acl is not None: - params.update({"ACL": acl}) - elif grant_write or grant_read or grant_full_control: - if grant_write: - params.update({"GrantWrite": grant_write}) - elif grant_read: - params.update({"GrantRead": grant_read}) - elif grant_full_control: - params.update({"GrantFullControl": grant_full_control}) - - if location_constraint: - params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) - - self._exec_request(self.boto3_client.create_bucket, params, endpoint=self.s3gate_endpoint, profile=self.profile) - return bucket - - @reporter.step("List buckets S3") - def list_buckets(self) -> list[str]: - response = self._exec_request( - self.boto3_client.list_buckets, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return [bucket["Name"] for bucket in response["Buckets"]] - - @reporter.step("Delete bucket S3") - def delete_bucket(self, bucket: str) -> None: - self._exec_request( - self.boto3_client.delete_bucket, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Head bucket S3") - def head_bucket(self, bucket: str) -> None: - self._exec_request( - self.boto3_client.head_bucket, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put bucket versioning status") - def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}} - self._exec_request( - self.boto3_client.put_bucket_versioning, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get bucket versioning status") - def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: - response = self._exec_request( - self.boto3_client.get_bucket_versioning, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("Status") - - @reporter.step("Put bucket tagging") - def put_bucket_tagging(self, bucket: str, tags: list) -> None: - tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - tagging = {"TagSet": tags} - params = self._convert_to_s3_params(locals(), exclude=["tags"]) - self._exec_request( - self.boto3_client.put_bucket_tagging, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get bucket tagging") - def get_bucket_tagging(self, bucket: str) -> list: - response = self._exec_request( - self.boto3_client.get_bucket_tagging, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("TagSet") - - @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> dict: - return self._exec_request( - self.boto3_client.get_bucket_acl, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Delete bucket tagging") - def delete_bucket_tagging(self, bucket: str) -> None: - self._exec_request( - self.boto3_client.delete_bucket_tagging, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put bucket ACL") - def put_bucket_acl( - self, - bucket: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> None: - params = self._convert_to_s3_params(locals()) - self._exec_request( - self.boto3_client.put_bucket_acl, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put object lock configuration") - def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - params = {"Bucket": bucket, "ObjectLockConfiguration": configuration} - return self._exec_request( - self.boto3_client.put_object_lock_configuration, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get object lock configuration") - def get_object_lock_configuration(self, bucket: str) -> dict: - response = self._exec_request( - self.boto3_client.get_object_lock_configuration, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("ObjectLockConfiguration") - - @reporter.step("Get bucket policy") - def get_bucket_policy(self, bucket: str) -> str: - response = self._exec_request( - self.boto3_client.get_bucket_policy, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("Policy") - - @reporter.step("Delete bucket policy") - def delete_bucket_policy(self, bucket: str) -> str: - return self._exec_request( - self.boto3_client.delete_bucket_policy, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put bucket policy") - def put_bucket_policy(self, bucket: str, policy: dict) -> None: - params = {"Bucket": bucket, "Policy": json.dumps(policy)} - return self._exec_request( - self.boto3_client.put_bucket_policy, - params, - # Overriding option for AWS CLI - policy=policy, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get bucket cors") - def get_bucket_cors(self, bucket: str) -> dict: - response = self._exec_request( - self.boto3_client.get_bucket_cors, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("CORSRules") - - @reporter.step("Get bucket location") - def get_bucket_location(self, bucket: str) -> str: - response = self._exec_request( - self.boto3_client.get_bucket_location, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("LocationConstraint") - - @reporter.step("Put bucket cors") - def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_client.put_bucket_cors, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Delete bucket cors") - def delete_bucket_cors(self, bucket: str) -> None: - self._exec_request( - self.boto3_client.delete_bucket_cors, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put bucket lifecycle configuration") - def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: - params = self._convert_to_s3_params(locals(), exclude=["dumped_configuration"]) - return self._exec_request( - self.boto3_client.put_bucket_lifecycle_configuration, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get bucket lifecycle configuration") - def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: - response = self._exec_request( - self.boto3_client.get_bucket_lifecycle_configuration, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return {"Rules": response.get("Rules")} - - @reporter.step("Delete bucket lifecycle configuration") - def delete_bucket_lifecycle(self, bucket: str) -> dict: - return self._exec_request( - self.boto3_client.delete_bucket_lifecycle, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - # END OF BUCKET METHODS # - # OBJECT METHODS # - - @reporter.step("List objects S3 v2") - def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - response = self._exec_request( - self.boto3_client.list_objects_v2, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - return response if full_output else obj_list - - @reporter.step("List objects S3") - def list_objects( - self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None - ) -> Union[dict, list[str]]: - params = {"Bucket": bucket} - if page_size: - params["MaxKeys"] = page_size - if prefix: - params["Prefix"] = prefix - response = self._exec_request( - self.boto3_client.list_objects, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - obj_list = [obj["Key"] for obj in response.get("Contents", [])] - logger.info(f"Found s3 objects: {obj_list}") - return response if full_output else obj_list - - @reporter.step("List objects versions S3") - def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: - response = self._exec_request( - self.boto3_client.list_object_versions, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response if full_output else response.get("Versions", []) - - @reporter.step("List objects delete markers S3") - def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: - response = self._exec_request( - self.boto3_client.list_object_versions, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response if full_output else response.get("DeleteMarkers", []) - - @reporter.step("Put object S3") - def put_object( - self, - bucket: str, - filepath: str, - key: Optional[str] = None, - metadata: Optional[dict] = None, - tagging: Optional[str] = None, - acl: Optional[str] = None, - object_lock_mode: Optional[str] = None, - object_lock_retain_until_date: Optional[datetime] = None, - object_lock_legal_hold_status: Optional[str] = None, - grant_full_control: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> str: - if key is None: - key = os.path.basename(filepath) - - with open(filepath, "rb") as put_file: - body = put_file.read() - - params = self._convert_to_s3_params(locals(), exclude=["filepath", "put_file"]) - response = self._exec_request( - self.boto3_client.put_object, - params, - body=filepath, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("VersionId") - - @reporter.step("Head object S3") - def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_client.head_object, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Delete object S3") - def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_client.delete_object, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Delete objects S3") - def delete_objects(self, bucket: str, keys: list[str]) -> dict: - params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)} - response = self._exec_request( - self.boto3_client.delete_objects, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - assert ( - "Errors" not in response - ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' - - return response - - @reporter.step("Delete object versions S3") - def delete_object_versions(self, bucket: str, object_versions: list) -> dict: - # Build deletion list in S3 format - delete_list = { - "Objects": [ - { - "Key": object_version["Key"], - "VersionId": object_version["VersionId"], - } - for object_version in object_versions - ] - } - params = {"Bucket": bucket, "Delete": delete_list} - return self._exec_request( - self.boto3_client.delete_objects, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Delete object versions S3 without delete markers") - def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: - # Delete objects without creating delete markers - for object_version in object_versions: - params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]} - self._exec_request( - self.boto3_client.delete_object, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put object ACL") - def put_object_acl( - self, - bucket: str, - key: str, - acl: Optional[str] = None, - grant_write: Optional[str] = None, - grant_read: Optional[str] = None, - ) -> list: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_client.put_object_acl, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("Grants") - - @reporter.step("Get object ACL") - def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_client.get_object_acl, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("Grants") - - @reporter.step("Copy object S3") - def copy_object( - self, - source_bucket: str, - source_key: str, - bucket: Optional[str] = None, - key: Optional[str] = None, - acl: Optional[str] = None, - metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, - metadata: Optional[dict] = None, - tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, - tagging: Optional[str] = None, - ) -> str: - if bucket is None: - bucket = source_bucket - - if key is None: - key = string_utils.unique_name("copy-object-") - - copy_source = f"{source_bucket}/{source_key}" - params = self._convert_to_s3_params(locals(), exclude=["source_bucket", "source_key"]) - - self._exec_request( - self.boto3_client.copy_object, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return key - - @reporter.step("Get object S3") - def get_object( - self, - bucket: str, - key: str, - version_id: Optional[str] = None, - object_range: Optional[tuple[int, int]] = None, - full_output: bool = False, - ) -> dict | TestFile: - range_str = None - if object_range: - range_str = f"bytes={object_range[0]}-{object_range[1]}" - - params = locals() - params.update({"Range": f"bytes={object_range[0]}-{object_range[1]}"} if object_range else {}) - params = self._convert_to_s3_params(params, exclude=["object_range", "full_output", "range_str"]) - response = self._exec_request( - self.boto3_client.get_object, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - if full_output: - return response - - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) - with open(test_file, "wb") as file: - chunk = response["Body"].read(1024) - while chunk: - file.write(chunk) - chunk = response["Body"].read(1024) - return test_file - - @reporter.step("Create multipart upload S3") - def create_multipart_upload(self, bucket: str, key: str) -> str: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_client.create_multipart_upload, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" - return response["UploadId"] - - @reporter.step("List multipart uploads S3") - def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: - response = self._exec_request( - self.boto3_client.list_multipart_uploads, - params={"Bucket": bucket}, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("Uploads") - - @reporter.step("Abort multipart upload S3") - def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - params = self._convert_to_s3_params(locals()) - self._exec_request( - self.boto3_client.abort_multipart_upload, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Upload part S3") - def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: - with open(filepath, "rb") as put_file: - body = put_file.read() - - params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) - params["PartNumber"] = part_num - - response = self._exec_request( - self.boto3_client.upload_part, - params, - body=filepath, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - assert response.get("ETag"), f"Expected ETag in response:\n{response}" - return response["ETag"] - - @reporter.step("Upload copy part S3") - def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) - params["PartNumber"] = part_num - response = self._exec_request( - self.boto3_client.upload_part_copy, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" - return response["CopyPartResult"]["ETag"] - - @reporter.step("List parts S3") - def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_client.list_parts, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - assert response.get("Parts"), f"Expected Parts in response:\n{response}" - return response["Parts"] - - @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: - parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] - params = self._convert_to_s3_params(locals(), exclude=["parts"]) - params["MultipartUpload"] = {"Parts": parts} - return self._exec_request( - self.boto3_client.complete_multipart_upload, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put object retention") - def put_object_retention( - self, - bucket: str, - key: str, - retention: dict, - version_id: Optional[str] = None, - bypass_governance_retention: Optional[bool] = None, - ) -> None: - params = self._convert_to_s3_params(locals()) - self._exec_request( - self.boto3_client.put_object_retention, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put object legal hold") - def put_object_legal_hold( - self, - bucket: str, - key: str, - legal_hold_status: Literal["ON", "OFF"], - version_id: Optional[str] = None, - ) -> None: - legal_hold = {"Status": legal_hold_status} - params = self._convert_to_s3_params(locals(), exclude=["legal_hold_status"]) - self._exec_request( - self.boto3_client.put_object_legal_hold, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Put object tagging") - def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: - tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - tagging = {"TagSet": tags} - params = self._convert_to_s3_params(locals(), exclude=["tags"]) - self._exec_request( - self.boto3_client.put_object_tagging, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get object tagging") - def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_client.get_object_tagging, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - return response.get("TagSet") - - @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str) -> None: - params = self._convert_to_s3_params(locals()) - self._exec_request( - self.boto3_client.delete_object_tagging, - params, - endpoint=self.s3gate_endpoint, - profile=self.profile, - ) - - @reporter.step("Get object attributes") - def get_object_attributes( - self, - bucket: str, - key: str, - attributes: list[str], - version_id: Optional[str] = None, - max_parts: Optional[int] = None, - part_number: Optional[int] = None, - full_output: bool = True, - ) -> dict: - logger.warning("Method get_object_attributes is not supported by boto3 client") - return {} - - @reporter.step("Sync directory S3") - def sync( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - raise NotImplementedError("Sync is not supported for boto3 client") - - @reporter.step("CP directory S3") - def cp( - self, - bucket: str, - dir_path: str, - acl: Optional[str] = None, - metadata: Optional[dict] = None, - ) -> dict: - raise NotImplementedError("Cp is not supported for boto3 client") - - # END OBJECT METHODS # - - # IAM METHODS # - # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) - - @reporter.step("Adds the specified user to the specified group") - def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.add_user_to_group, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Attaches the specified managed policy to the specified IAM group") - def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.attach_group_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Attaches the specified managed policy to the specified user") - def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.attach_user_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") - def iam_create_access_key(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.create_access_key, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - access_key_id = response["AccessKey"].get("AccessKeyId") - secret_access_key = response["AccessKey"].get("SecretAccessKey") - assert access_key_id, f"Expected AccessKeyId in response:\n{response}" - assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" - - return access_key_id, secret_access_key - - @reporter.step("Creates a new group") - def iam_create_group(self, group_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.create_group, - params={"GroupName": group_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("Group"), f"Expected Group in response:\n{response}" - assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - - return response - - @reporter.step("Creates a new managed policy for your AWS account") - def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals()) - params["PolicyDocument"] = json.dumps(policy_document) - response = self._exec_request( - self.boto3_iam_client.create_policy, - params, - # Overriding option for AWS CLI - policy_document=policy_document, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" - - return response - - @reporter.step("Creates a new IAM user for your AWS account") - def iam_create_user(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.create_user, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("User"), f"Expected User in response:\n{response}" - assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" - - return response - - @reporter.step("Deletes the access key pair associated with the specified IAM user") - def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.delete_access_key, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Deletes the specified IAM group") - def iam_delete_group(self, group_name: str) -> dict: - return self._exec_request( - self.boto3_iam_client.delete_group, - params={"GroupName": group_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") - def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.delete_group_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Deletes the specified managed policy") - def iam_delete_policy(self, policy_arn: str) -> dict: - return self._exec_request( - self.boto3_iam_client.delete_policy, - params={"PolicyArn": policy_arn}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Deletes the specified IAM user") - def iam_delete_user(self, user_name: str) -> dict: - return self._exec_request( - self.boto3_iam_client.delete_user, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") - def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.delete_user_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Removes the specified managed policy from the specified IAM group") - def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.detach_group_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Removes the specified managed policy from the specified user") - def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.detach_user_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Returns a list of IAM users that are in the specified IAM group") - def iam_get_group(self, group_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.get_group, - params={"GroupName": group_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - return response - - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") - def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.get_group_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Retrieves information about the specified managed policy") - def iam_get_policy(self, policy_arn: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.get_policy, - params={"PolicyArn": policy_arn}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" - - return response - - @reporter.step("Retrieves information about the specified version of the specified managed policy") - def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.get_policy_version, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" - assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" - - return response - - @reporter.step("Retrieves information about the specified IAM user") - def iam_get_user(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.get_user, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("User"), f"Expected User in response:\n{response}" - assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" - - return response - - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") - def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - response = self._exec_request( - self.boto3_iam_client.get_user_policy, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert response.get("UserName"), f"Expected UserName in response:\n{response}" - return response - - @reporter.step("Returns information about the access key IDs associated with the specified IAM user") - def iam_list_access_keys(self, user_name: str) -> dict: - return self._exec_request( - self.boto3_iam_client.list_access_keys, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Lists all managed policies that are attached to the specified IAM group") - def iam_list_attached_group_policies(self, group_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_attached_group_policies, - params={"GroupName": group_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" - return response - - @reporter.step("Lists all managed policies that are attached to the specified IAM user") - def iam_list_attached_user_policies(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_attached_user_policies, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" - return response - - @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") - def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_entities_for_policy, - params={"PolicyArn": policy_arn}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" - assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" - - return response - - @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") - def iam_list_group_policies(self, group_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_group_policies, - params={"GroupName": group_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" - return response - - @reporter.step("Lists the IAM groups") - def iam_list_groups(self) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_groups, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" - return response - - @reporter.step("Lists the IAM groups that the specified IAM user belongs to") - def iam_list_groups_for_user(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_groups_for_user, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" - return response - - @reporter.step("Lists all the managed policies that are available in your AWS account") - def iam_list_policies(self) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_policies, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert response.get("Policies"), f"Expected Policies in response:\n{response}" - return response - - @reporter.step("Lists information about the versions of the specified managed policy") - def iam_list_policy_versions(self, policy_arn: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_policy_versions, - params={"PolicyArn": policy_arn}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert response.get("Versions"), f"Expected Versions in response:\n{response}" - return response - - @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") - def iam_list_user_policies(self, user_name: str) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_user_policies, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" - return response - - @reporter.step("Lists the IAM users") - def iam_list_users(self) -> dict: - response = self._exec_request( - self.boto3_iam_client.list_users, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - assert response.get("Users"), f"Expected Users in response:\n{response}" - return response - - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") - def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals()) - params["PolicyDocument"] = json.dumps(policy_document) - response = self._exec_request( - self.boto3_iam_client.put_group_policy, - params, - # Overriding option for AWS CLI - policy_document=policy_document, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") - def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals()) - params["PolicyDocument"] = json.dumps(policy_document) - response = self._exec_request( - self.boto3_iam_client.put_user_policy, - params, - # Overriding option for AWS CLI - policy_document=policy_document, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - sleep(S3_SYNC_WAIT_TIME * 14) - return response - - @reporter.step("Removes the specified user from the specified group") - def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.remove_user_from_group, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Updates the name and/or the path of the specified IAM group") - def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - params = {"GroupName": group_name, "NewGroupName": new_name, "NewPath": "/"} - return self._exec_request( - self.boto3_iam_client.update_group, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Updates the name and/or the path of the specified IAM user") - def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - params = {"UserName": user_name, "NewUserName": new_name, "NewPath": "/"} - return self._exec_request( - self.boto3_iam_client.update_user, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Adds one or more tags to an IAM user") - def iam_tag_user(self, user_name: str, tags: list) -> dict: - params = self._convert_to_s3_params(locals()) - params["Tags"] = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - return self._exec_request( - self.boto3_iam_client.tag_user, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("List tags of IAM user") - def iam_list_user_tags(self, user_name: str) -> dict: - return self._exec_request( - self.boto3_iam_client.list_user_tags, - params={"UserName": user_name}, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - @reporter.step("Removes the specified tags from the user") - def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - params = self._convert_to_s3_params(locals()) - return self._exec_request( - self.boto3_iam_client.untag_user, - params, - endpoint=self.iam_endpoint, - profile=self.profile, - ) - - # MFA methods - @reporter.step("Creates a new virtual MFA device") - def iam_create_virtual_mfa_device( - self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None - ) -> tuple: - response = self.boto3_iam_client.create_virtual_mfa_device(VirtualMFADeviceName=virtual_mfa_device_name) - - serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") - base32StringSeed = response.get("VirtualMFADevice", {}).get("Base32StringSeed") - assert serial_number, f"Expected SerialNumber in response:\n{response}" - assert base32StringSeed, f"Expected Base32StringSeed in response:\n{response}" - - return serial_number, base32StringSeed - - @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") - def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: - response = self.boto3_iam_client.deactivate_mfa_device(UserName=user_name, SerialNumber=serial_number) - - return response - - @reporter.step("Deletes a virtual MFA device") - def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: - response = self.boto3_iam_client.delete_virtual_mfa_device(SerialNumber=serial_number) - - return response - - @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") - def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: - response = self.boto3_iam_client.enable_mfa_device( - UserName=user_name, - SerialNumber=serial_number, - AuthenticationCode1=authentication_code1, - AuthenticationCode2=authentication_code2, - ) - - return response - - @reporter.step("Lists the MFA devices for an IAM user") - def iam_list_virtual_mfa_devices(self) -> dict: - response = self.boto3_iam_client.list_virtual_mfa_devices() - assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" - - return response - - @reporter.step("Get session token for user") - def sts_get_session_token( - self, duration_seconds: Optional[str] = "", serial_number: Optional[str] = "", token_code: Optional[str] = "" - ) -> tuple: - response = self.boto3_sts_client.get_session_token( - DurationSeconds=duration_seconds, - SerialNumber=serial_number, - TokenCode=token_code, - ) - - access_key = response.get("Credentials", {}).get("AccessKeyId") - secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") - session_token = response.get("Credentials", {}).get("SessionToken") - assert access_key, f"Expected AccessKeyId in response:\n{response}" - assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" - assert session_token, f"Expected SessionToken in response:\n{response}" - - return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py deleted file mode 100644 index 4d845cf..0000000 --- a/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py +++ /dev/null @@ -1,16 +0,0 @@ -import re - -from frostfs_testlib.cli.generic_cli import GenericCli -from frostfs_testlib.clients.s3 import BucketContainerResolver -from frostfs_testlib.storage.cluster import ClusterNode - - -class CurlBucketContainerResolver(BucketContainerResolver): - def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: - curl = GenericCli("curl", node.host) - output = curl(f"-I http://127.0.0.1:8084/{bucket_name}") - pattern = r"X-Container-Id: (\S+)" - cid = re.findall(pattern, output.stdout) - if cid: - return cid[0] - return None diff --git a/src/frostfs_testlib/clients/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py deleted file mode 100644 index f6f423d..0000000 --- a/src/frostfs_testlib/clients/s3/s3_http_client.py +++ /dev/null @@ -1,149 +0,0 @@ -import hashlib -import logging -import xml.etree.ElementTree as ET - -import httpx -from botocore.auth import SigV4Auth -from botocore.awsrequest import AWSRequest -from botocore.credentials import Credentials - -from frostfs_testlib import reporter -from frostfs_testlib.clients import HttpClient -from frostfs_testlib.utils.file_utils import TestFile - -logger = logging.getLogger("NeoLogger") - -DEFAULT_TIMEOUT = 60.0 - - -class S3HttpClient: - def __init__( - self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" - ) -> None: - self.http_client = HttpClient() - self.credentials = Credentials(access_key_id, secret_access_key) - self.profile = profile - self.region = region - - self.iam_endpoint: str = None - self.s3gate_endpoint: str = None - self.service: str = None - self.signature: SigV4Auth = None - - self.set_endpoint(s3gate_endpoint) - - def _to_s3_header(self, header: str) -> dict: - replacement_map = { - "Acl": "ACL", - "_": "-", - } - - result = header - if not header.startswith("x_amz"): - result = header.title() - - for find, replace in replacement_map.items(): - result = result.replace(find, replace) - - return result - - def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None): - exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] - return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None} - - def _create_aws_request( - self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None - ) -> AWSRequest: - data = b"" - - if content is not None: - if isinstance(content, TestFile): - with open(content, "rb") as io_content: - data = io_content.read() - elif isinstance(content, str): - data = bytes(content, encoding="utf-8") - elif isinstance(content, bytes): - data = content - else: - raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}") - - headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest() - aws_request = AWSRequest(method, url, headers, data, params) - self.signature.add_auth(aws_request) - - return aws_request - - def _exec_request( - self, - method: str, - url: str, - headers: dict, - content: str | bytes | TestFile = None, - params: dict = None, - timeout: float = DEFAULT_TIMEOUT, - ) -> dict: - aws_request = self._create_aws_request(method, url, headers, content, params) - response = self.http_client.send( - aws_request.method, - aws_request.url, - headers=dict(aws_request.headers), - data=aws_request.data, - params=aws_request.params, - timeout=timeout, - ) - - try: - response.raise_for_status() - except httpx.HTTPStatusError: - raise httpx.HTTPStatusError(response.text, request=response.request, response=response) - - root = ET.fromstring(response.read()) - data = { - "LastModified": root.find(".//LastModified").text, - "ETag": root.find(".//ETag").text, - } - - if response.headers.get("x-amz-version-id"): - data["VersionId"] = response.headers.get("x-amz-version-id") - - return data - - @reporter.step("Set endpoint S3 to {s3gate_endpoint}") - def set_endpoint(self, s3gate_endpoint: str): - if self.s3gate_endpoint == s3gate_endpoint: - return - - self.s3gate_endpoint = s3gate_endpoint - self.service = "s3" - self.signature = SigV4Auth(self.credentials, self.service, self.region) - - @reporter.step("Set endpoint IAM to {iam_endpoint}") - def set_iam_endpoint(self, iam_endpoint: str): - if self.iam_endpoint == iam_endpoint: - return - - self.iam_endpoint = iam_endpoint - self.service = "iam" - self.signature = SigV4Auth(self.credentials, self.service, self.region) - - @reporter.step("Patch object S3") - def patch_object( - self, - bucket: str, - key: str, - content: str | bytes | TestFile, - content_range: str, - version_id: str = None, - if_match: str = None, - if_unmodified_since: str = None, - x_amz_expected_bucket_owner: str = None, - timeout: float = DEFAULT_TIMEOUT, - ) -> dict: - if content_range and not content_range.startswith("bytes"): - content_range = f"bytes {content_range}/*" - - url = f"{self.s3gate_endpoint}/{bucket}/{key}" - headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"]) - params = {"VersionId": version_id} if version_id is not None else None - - return self._exec_request("PATCH", url, headers, content, params, timeout=timeout) diff --git a/src/frostfs_testlib/credentials/authmate_s3_provider.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py deleted file mode 100644 index ed6454b..0000000 --- a/src/frostfs_testlib/credentials/authmate_s3_provider.py +++ /dev/null @@ -1,47 +0,0 @@ -import re -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User -from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC -from frostfs_testlib.shell import LocalShell -from frostfs_testlib.steps.cli.container import list_containers -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate -from frostfs_testlib.utils import string_utils - - -class AuthmateS3CredentialsProvider(S3CredentialsProvider): - @reporter.step("Init S3 Credentials using Authmate CLI") - def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials: - cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes - shell = LocalShell() - wallet = user.wallet - endpoint = cluster_node.storage_node.get_rpc_endpoint() - - gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] - # unique short bucket name - bucket = string_utils.unique_name("bucket-") - - frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) - issue_secret_output = frostfs_authmate.secret.issue( - wallet=wallet.path, - peer=endpoint, - gate_public_key=gate_public_keys, - wallet_password=wallet.password, - container_policy=location_constraints, - container_friendly_name=bucket, - ).stdout - - aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id")) - aws_secret_access_key = str( - re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group("aws_secret_access_key") - ) - cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) - - containers_list = list_containers(wallet, shell, endpoint) - assert cid in containers_list, f"Expected cid {cid} in {containers_list}" - - user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key) - return user.s3_credentials diff --git a/src/frostfs_testlib/credentials/interfaces.py b/src/frostfs_testlib/credentials/interfaces.py deleted file mode 100644 index b2ae6f1..0000000 --- a/src/frostfs_testlib/credentials/interfaces.py +++ /dev/null @@ -1,51 +0,0 @@ -from abc import ABC, abstractmethod -from dataclasses import dataclass, field -from typing import Any, Optional - -from frostfs_testlib.plugins import load_plugin -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - - -@dataclass -class S3Credentials: - access_key: str - secret_key: str - - -@dataclass -class User: - name: str - attributes: dict[str, Any] = field(default_factory=dict) - wallet: WalletInfo | None = None - s3_credentials: S3Credentials | None = None - - -class S3CredentialsProvider(ABC): - def __init__(self, cluster: Cluster) -> None: - self.cluster = cluster - - @abstractmethod - def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials: - raise NotImplementedError("Directly called abstract class?") - - -class GrpcCredentialsProvider(ABC): - def __init__(self, cluster: Cluster) -> None: - self.cluster = cluster - - @abstractmethod - def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo: - raise NotImplementedError("Directly called abstract class?") - - -class CredentialsProvider(object): - S3: S3CredentialsProvider - GRPC: GrpcCredentialsProvider - - def __init__(self, cluster: Cluster) -> None: - config = cluster.cluster_nodes[0].host.config - s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name) - self.S3 = s3_cls(cluster) - grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name) - self.GRPC = grpc_cls(cluster) diff --git a/src/frostfs_testlib/credentials/wallet_factory_provider.py b/src/frostfs_testlib/credentials/wallet_factory_provider.py deleted file mode 100644 index d00020f..0000000 --- a/src/frostfs_testlib/credentials/wallet_factory_provider.py +++ /dev/null @@ -1,14 +0,0 @@ -from frostfs_testlib import reporter -from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User -from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS -from frostfs_testlib.shell.local_shell import LocalShell -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo - - -class WalletFactoryProvider(GrpcCredentialsProvider): - @reporter.step("Init gRPC Credentials using wallet generation") - def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: - wallet_factory = WalletFactory(ASSETS_DIR, LocalShell()) - user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS) - return user.wallet diff --git a/src/frostfs_testlib/defaults.py b/src/frostfs_testlib/defaults.py index 22097be..687fbd6 100644 --- a/src/frostfs_testlib/defaults.py +++ b/src/frostfs_testlib/defaults.py @@ -1,5 +1,5 @@ class Options: - DEFAULT_SHELL_TIMEOUT = 120 + DEFAULT_SHELL_TIMEOUT = 90 @staticmethod def get_default_shell_timeout(): diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py deleted file mode 100644 index 7d767d2..0000000 --- a/src/frostfs_testlib/fixtures.py +++ /dev/null @@ -1,52 +0,0 @@ -import logging -import os -from datetime import datetime -from importlib.metadata import entry_points - -import pytest -import yaml - -from frostfs_testlib import reporter -from frostfs_testlib.hosting.hosting import Hosting -from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE -from frostfs_testlib.storage import get_service_registry - - -@pytest.fixture(scope="session", autouse=True) -def session_start_time(): - start_time = datetime.utcnow() - return start_time - - -@pytest.fixture(scope="session") -def configure_testlib(): - reporter.get_reporter().register_handler(reporter.AllureHandler()) - reporter.get_reporter().register_handler(reporter.StepsLogger()) - logging.getLogger("paramiko").setLevel(logging.INFO) - - # Register Services for cluster - registry = get_service_registry() - services = entry_points(group="frostfs.testlib.services") - for svc in services: - registry.register_service(svc.name, svc.load()) - - -@pytest.fixture(scope="session") -def temp_directory(configure_testlib): - with reporter.step("Prepare tmp directory"): - full_path = ASSETS_DIR - if not os.path.exists(full_path): - os.mkdir(full_path) - - return full_path - - -@pytest.fixture(scope="session") -def hosting(configure_testlib) -> Hosting: - with open(HOSTING_CONFIG_FILE, "r") as file: - hosting_config = yaml.full_load(file) - - hosting_instance = Hosting() - hosting_instance.configure(hosting_config) - - return hosting_instance diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py deleted file mode 100644 index fc7ba59..0000000 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ /dev/null @@ -1,109 +0,0 @@ -from typing import Callable - -from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.healthcheck.interfaces import Healthcheck -from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC -from frostfs_testlib.shell import CommandOptions -from frostfs_testlib.steps.node_management import storage_node_healthcheck -from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass -from frostfs_testlib.testing.test_control import wait_for_success -from frostfs_testlib.utils.failover_utils import check_services_status - - -class BasicHealthcheck(Healthcheck): - def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]): - issues: list[str] = [] - for check, kwargs in checks.items(): - issue = check(cluster_node, **kwargs) - if issue: - issues.append(issue) - - assert not issues, "Issues found:\n" + "\n".join(issues) - - @wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}") - def full_healthcheck(self, cluster_node: ClusterNode): - checks = { - self.storage_healthcheck: {}, - self._tree_healthcheck: {}, - } - - self._perform(cluster_node, checks) - - @wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}") - def startup_healthcheck(self, cluster_node: ClusterNode): - checks = { - self.storage_healthcheck: {}, - self._tree_healthcheck: {}, - } - - self._perform(cluster_node, checks) - - @wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}") - def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: - checks = { - self._storage_healthcheck: {}, - } - - self._perform(cluster_node, checks) - - @wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}") - def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: - checks = { - self._tree_healthcheck: {}, - } - - self._perform(cluster_node, checks) - - @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}") - def services_healthcheck(self, cluster_node: ClusterNode): - svcs_to_check = cluster_node.services - checks = { - check_services_status: { - "service_list": svcs_to_check, - "expected_status": "active", - }, - self._check_services: {"services": svcs_to_check}, - } - - self._perform(cluster_node, checks) - - def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]): - for svc in services: - result = svc.service_healthcheck() - if result == False: - return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}." - - @reporter.step("Storage healthcheck on {cluster_node}") - def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: - result = storage_node_healthcheck(cluster_node.storage_node) - self._gather_socket_info(cluster_node) - if result.health_status != "READY" or result.network_status != "ONLINE": - return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" - - @reporter.step("Tree healthcheck on {cluster_node}") - def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: - host = cluster_node.host - service_config = host.get_service_config(cluster_node.storage_node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - remote_cli = FrostfsCli( - shell, - host.get_cli_config(FROSTFS_CLI_EXEC).exec_path, - config_file=wallet_config_path, - ) - result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080") - if result.return_code != 0: - return ( - f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" - ) - - @reporter.step("Gather socket info for {cluster_node}") - def _gather_socket_info(self, cluster_node: ClusterNode): - cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py deleted file mode 100644 index cf17852..0000000 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ /dev/null @@ -1,25 +0,0 @@ -from abc import ABC, abstractmethod - -from frostfs_testlib.storage.cluster import ClusterNode - - -class Healthcheck(ABC): - @abstractmethod - def full_healthcheck(self, cluster_node: ClusterNode): - """Perform full healthcheck on the target cluster node""" - - @abstractmethod - def startup_healthcheck(self, cluster_node: ClusterNode): - """Perform healthcheck required on startup of target cluster node""" - - @abstractmethod - def storage_healthcheck(self, cluster_node: ClusterNode): - """Perform storage service healthcheck on target cluster node""" - - @abstractmethod - def services_healthcheck(self, cluster_node: ClusterNode): - """Perform service status check on target cluster node""" - - @abstractmethod - def tree_healthcheck(self, cluster_node: ClusterNode): - """Perform tree healthcheck on target cluster node""" diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py deleted file mode 100644 index c56c75a..0000000 --- a/src/frostfs_testlib/hooks.py +++ /dev/null @@ -1,31 +0,0 @@ -import pytest - - -@pytest.hookimpl(specname="pytest_collection_modifyitems") -def pytest_add_frostfs_marker(items: list[pytest.Item]): - # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding - # nodeid = full path of the test - # 1. plugins - # 2. testlib itself - for item in items: - location = item.location[0] - if "frostfs" in location and "plugin" not in location and "testlib" not in location: - item.add_marker("frostfs") - - -# pytest hook. Do not rename -@pytest.hookimpl(trylast=True) -def pytest_collection_modifyitems(items: list[pytest.Item]): - # The order of running tests corresponded to the suites - items.sort(key=lambda item: item.nodeid) - - # Change order of tests based on @pytest.mark.order() marker - def order(item: pytest.Item) -> int: - order_marker = item.get_closest_marker("order") - if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)): - raise RuntimeError("Incorrect usage of pytest.mark.order") - - order_value = order_marker.args[0] if order_marker else 0 - return order_value - - items.sort(key=lambda item: order(item)) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 6cdee39..dd8b4b9 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -10,7 +10,9 @@ class ParsedAttributes: def parse(cls, attributes: dict[str, Any]): # Pick attributes supported by the class field_names = set(field.name for field in fields(cls)) - supported_attributes = {key: value for key, value in attributes.items() if key in field_names} + supported_attributes = { + key: value for key, value in attributes.items() if key in field_names + } return cls(**supported_attributes) @@ -27,7 +29,6 @@ class CLIConfig: name: str exec_path: str attributes: dict[str, str] = field(default_factory=dict) - extra_args: list[str] = field(default_factory=list) @dataclass @@ -51,7 +52,6 @@ class HostConfig: Attributes: plugin_name: Name of plugin that should be used to manage the host. - healthcheck_plugin_name: Name of the plugin for healthcheck operations. address: Address of the machine (IP or DNS name). services: List of services hosted on the machine. clis: List of CLI tools available on the machine. @@ -60,17 +60,10 @@ class HostConfig: """ plugin_name: str - hostname: str - healthcheck_plugin_name: str address: str - s3_creds_plugin_name: str = field(default="authmate") - grpc_creds_plugin_name: str = field(default="wallet_factory") - product: str = field(default="frostfs") services: list[ServiceConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list) attributes: dict[str, str] = field(default_factory=dict) - interfaces: dict[str, str] = field(default_factory=dict) - environment: dict[str, str] = field(default_factory=dict) def __post_init__(self) -> None: self.services = [ServiceConfig(**service) for service in self.services or []] diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index d458b0a..b7f4852 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -11,7 +11,7 @@ import docker from requests import HTTPError from frostfs_testlib.hosting.config import ParsedAttributes -from frostfs_testlib.hosting.interfaces import DiskInfo, Host, HostStatus +from frostfs_testlib.hosting.interfaces import DiskInfo, Host from frostfs_testlib.shell import LocalShell, Shell, SSHShell from frostfs_testlib.shell.command_inspectors import SudoInspector @@ -61,10 +61,10 @@ class ServiceAttributes(ParsedAttributes): class DockerHost(Host): """Manages services hosted in Docker containers running on a local or remote machine.""" - def get_shell(self, sudo: bool = False) -> Shell: + def get_shell(self) -> Shell: host_attributes = HostAttributes.parse(self._config.attributes) command_inspectors = [] - if sudo: + if host_attributes.sudo_shell: command_inspectors.append(SudoInspector()) if not host_attributes.ssh_login: @@ -87,15 +87,6 @@ class DockerHost(Host): for service_config in self._config.services: self.start_service(service_config.name) - def get_host_status(self) -> HostStatus: - # We emulate host status by checking all services. - for service_config in self._config.services: - state = self._get_container_state(service_config.name) - if state != "running": - return HostStatus.OFFLINE - - return HostStatus.ONLINE - def stop_host(self) -> None: # We emulate stopping machine by stopping all services # As an alternative we can probably try to stop docker service... @@ -126,20 +117,6 @@ class DockerHost(Host): timeout=service_attributes.stop_timeout, ) - def mask_service(self, service_name: str) -> None: - # Not required for Docker - return - - def unmask_service(self, service_name: str) -> None: - # Not required for Docker - return - - def wait_success_suspend_process(self, service_name: str): - raise NotImplementedError("Not supported for docker") - - def wait_success_resume_process(self, service_name: str): - raise NotImplementedError("Not supported for docker") - def restart_service(self, service_name: str) -> None: service_attributes = self._get_service_attributes(service_name) @@ -152,21 +129,6 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) - def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: - raise NotImplementedError("Not implemented for docker") - - def get_data_directory(self, service_name: str) -> str: - service_attributes = self._get_service_attributes(service_name) - - client = self._get_docker_client() - volume_info = client.inspect_volume(service_attributes.volume_name) - volume_path = volume_info["Mountpoint"] - - return volume_path - - def send_signal_to_service(self, service_name: str, signal: str) -> None: - raise NotImplementedError("Not implemented for docker") - def delete_metabase(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") @@ -182,20 +144,12 @@ class DockerHost(Host): def delete_pilorama(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") - def delete_file(self, file_path: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def is_file_exist(self, file_path: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def wipefs_storage_node_data(self, service_name: str) -> None: - raise NotImplementedError("Not implemented for docker") - - def finish_wipefs(self, service_name: str) -> None: - raise NotImplementedError("Not implemented for docker") - def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: - volume_path = self.get_data_directory(service_name) + service_attributes = self._get_service_attributes(service_name) + + client = self._get_docker_client() + volume_info = client.inspect_volume(service_attributes.volume_name) + volume_path = volume_info["Mountpoint"] shell = self.get_shell() meta_clean_cmd = f"rm -rf {volume_path}/meta*/*" @@ -242,42 +196,11 @@ class DockerHost(Host): with open(file_path, "wb") as file: file.write(logs) - def get_filtered_logs( - self, - filter_regex: str, - since: Optional[datetime] = None, - until: Optional[datetime] = None, - unit: Optional[str] = None, - exclude_filter: Optional[str] = None, - priority: Optional[str] = None, - word_count: bool = None, - ) -> str: - client = self._get_docker_client() - filtered_logs = "" - for service_config in self._config.services: - container_name = self._get_service_attributes(service_config.name).container_name - try: - filtered_logs = client.logs(container_name, since=since, until=until) - except HTTPError as exc: - logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") - continue - - if exclude_filter: - filtered_logs = filtered_logs.replace(exclude_filter, "") - matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE) - found = list(matches) - - if found: - filtered_logs += f"{container_name}:\n{os.linesep.join(found)}" - - return filtered_logs - def is_message_in_logs( self, message_regex: str, since: Optional[datetime] = None, until: Optional[datetime] = None, - unit: Optional[str] = None, ) -> bool: client = self._get_docker_client() for service_config in self._config.services: @@ -320,23 +243,20 @@ class DockerHost(Host): return container return None - def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None: + def _wait_for_container_to_be_in_state( + self, container_name: str, expected_state: str, timeout: int + ) -> None: iterations = 10 iteration_wait_time = timeout / iterations # To speed things up, we break timeout in smaller iterations and check container state # several times. This way waiting stops as soon as container reaches the expected state for _ in range(iterations): - state = self._get_container_state(container_name) + container = self._get_container_by_name(container_name) + logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}") - if state == expected_state: + if container and container["State"] == expected_state: return time.sleep(iteration_wait_time) raise RuntimeError(f"Container {container_name} is not in {expected_state} state.") - - def _get_container_state(self, container_name: str) -> str: - container = self._get_container_by_name(container_name) - logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}") - - return container.get("State", None) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index a41161c..9178523 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -4,14 +4,6 @@ from typing import Optional from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig from frostfs_testlib.shell.interfaces import Shell -from frostfs_testlib.testing.readable import HumanReadableEnum -from frostfs_testlib.testing.test_control import retry - - -class HostStatus(HumanReadableEnum): - ONLINE = "Online" - OFFLINE = "Offline" - UNKNOWN = "Unknown" class DiskInfo(dict): @@ -26,12 +18,11 @@ class Host(ABC): def __init__(self, config: HostConfig) -> None: self._config = config - self._service_config_by_name = {service_config.name: service_config for service_config in config.services} + self._service_config_by_name = { + service_config.name: service_config for service_config in config.services + } self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} - def __repr__(self) -> str: - return self.config.address - @property def config(self) -> HostConfig: """Returns config of the host. @@ -57,7 +48,7 @@ class Host(ABC): raise ValueError(f"Unknown service name: '{service_name}'") return service_config - def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig: + def get_cli_config(self, cli_name: str) -> CLIConfig: """Returns config of CLI tool with specified name. The CLI must be located on this host. @@ -69,17 +60,14 @@ class Host(ABC): Config of the CLI tool. """ cli_config = self._cli_config_by_name.get(cli_name) - if cli_config is None and not allow_empty: + if cli_config is None: raise ValueError(f"Unknown CLI name: '{cli_name}'") return cli_config @abstractmethod - def get_shell(self, sudo: bool = True) -> Shell: + def get_shell(self) -> Shell: """Returns shell to this host. - Args: - sudo: if True, run all commands in shell with elevated rights - Returns: Shell that executes commands on this host. """ @@ -88,10 +76,6 @@ class Host(ABC): def start_host(self) -> None: """Starts the host machine.""" - @abstractmethod - def get_host_status(self) -> HostStatus: - """Check host status.""" - @abstractmethod def stop_host(self, mode: str) -> None: """Stops the host machine. @@ -120,37 +104,6 @@ class Host(ABC): service_name: Name of the service to stop. """ - @abstractmethod - def send_signal_to_service(self, service_name: str, signal: str) -> None: - """Send signal to service with specified name using kill - - - The service must be hosted on this host. - - Args: - service_name: Name of the service to stop. - signal: signal name. See kill -l to all names - """ - - @abstractmethod - def mask_service(self, service_name: str) -> None: - """Prevent the service from start by any activity by masking it. - - The service must be hosted on this host. - - Args: - service_name: Name of the service to mask. - """ - - @abstractmethod - def unmask_service(self, service_name: str) -> None: - """Allow the service to start by any activity by unmasking it. - - The service must be hosted on this host. - - Args: - service_name: Name of the service to unmask. - """ - @abstractmethod def restart_service(self, service_name: str) -> None: """Restarts the service with specified name and waits until it starts. @@ -159,30 +112,6 @@ class Host(ABC): service_name: Name of the service to restart. """ - @abstractmethod - def get_data_directory(self, service_name: str) -> str: - """ - Getting path to data directory on node for further usage - (example: list databases pilorama.db) - - Args: - service_name: Name of storage node service. - """ - - @abstractmethod - def wait_success_suspend_process(self, process_name: str) -> None: - """Search for a service ID by its name and stop the process - Args: - process_name: Name - """ - - @abstractmethod - def wait_success_resume_process(self, process_name: str) -> None: - """Search for a service by its ID and start the process - Args: - process_name: Name - """ - @abstractmethod def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: """Erases all data of the storage node with specified name. @@ -192,21 +121,6 @@ class Host(ABC): cache_only: To delete cache only. """ - @abstractmethod - def wipefs_storage_node_data(self, service_name: str) -> None: - """Erases all data of the storage node with specified name. - - Args: - service_name: Name of storage node service. - """ - - def finish_wipefs(self, service_name: str) -> None: - """Erases all data of the storage node with specified name. - - Args: - service_name: Name of storage node service. - """ - @abstractmethod def delete_fstree(self, service_name: str) -> None: """ @@ -248,22 +162,12 @@ class Host(ABC): """ @abstractmethod - def delete_file(self, file_path: str) -> None: + def delete_pilorama(self, service_name: str) -> None: """ - Deletes file with provided file path + Deletes all pilorama.db files in the node. Args: - file_path: full path to the file to delete - - """ - - @abstractmethod - def is_file_exist(self, file_path: str) -> bool: - """ - Checks if file exist - - Args: - file_path: full path to the file to check + service_name: Name of storage node service. """ @@ -318,40 +222,12 @@ class Host(ABC): filter_regex: regex to filter output """ - @abstractmethod - def get_filtered_logs( - self, - filter_regex: str, - since: Optional[datetime] = None, - until: Optional[datetime] = None, - unit: Optional[str] = None, - exclude_filter: Optional[str] = None, - priority: Optional[str] = None, - word_count: bool = None, - ) -> str: - """Get logs from host filtered by regex. - - Args: - filter_regex: regex filter for logs. - since: If set, limits the time from which logs should be collected. Must be in UTC. - until: If set, limits the time until which logs should be collected. Must be in UTC. - unit: required unit. - priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. - For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. - word_count: output type, expected values: lines, bytes, json - - Returns: - Found entries as str if any found. - Empty string otherwise. - """ - @abstractmethod def is_message_in_logs( self, message_regex: str, since: Optional[datetime] = None, until: Optional[datetime] = None, - unit: Optional[str] = None, ) -> bool: """Checks logs on host for specified message regex. @@ -364,35 +240,3 @@ class Host(ABC): True if message found in logs in the given time frame. False otherwise. """ - - @abstractmethod - def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: - """ - Waites for service to be in specified state. - - Args: - systemd_service_name: Service to wait state of. - expected_state: State to wait for - timeout: Seconds to wait - - """ - - def down_interface(self, interface: str) -> None: - shell = self.get_shell() - shell.exec(f"ip link set {interface} down") - - def up_interface(self, interface: str) -> None: - shell = self.get_shell() - shell.exec(f"ip link set {interface} up") - - def check_state(self, interface: str) -> str: - shell = self.get_shell() - return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip() - - @retry(max_attempts=5, sleep_interval=5, expected_result="UP") - def check_state_up(self, interface: str) -> str: - return self.check_state(interface=interface) - - @retry(max_attempts=5, sleep_interval=5, expected_result="DOWN") - def check_state_down(self, interface: str) -> str: - return self.check_state(interface=interface) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py deleted file mode 100644 index 8477ee4..0000000 --- a/src/frostfs_testlib/load/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from frostfs_testlib.load.interfaces.loader import Loader -from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner -from frostfs_testlib.load.load_config import ( - EndpointSelectionStrategy, - K6ProcessAllocationStrategy, - LoadParams, - LoadScenario, - LoadType, - NodesSelectionStrategy, - Preset, - ReadFrom, -) -from frostfs_testlib.load.load_report import LoadReport -from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader -from frostfs_testlib.load.runners import DefaultRunner, LocalRunner, S3LocalRunner diff --git a/src/frostfs_testlib/load/interfaces/loader.py b/src/frostfs_testlib/load/interfaces/loader.py deleted file mode 100644 index 2c818d9..0000000 --- a/src/frostfs_testlib/load/interfaces/loader.py +++ /dev/null @@ -1,14 +0,0 @@ -from abc import ABC, abstractmethod - -from frostfs_testlib.shell.interfaces import Shell - - -class Loader(ABC): - @abstractmethod - def get_shell(self) -> Shell: - """Get shell for the loader""" - - @property - @abstractmethod - def ip(self): - """Get address of the loader""" diff --git a/src/frostfs_testlib/load/interfaces/scenario_runner.py b/src/frostfs_testlib/load/interfaces/scenario_runner.py deleted file mode 100644 index c0062a9..0000000 --- a/src/frostfs_testlib/load/interfaces/scenario_runner.py +++ /dev/null @@ -1,55 +0,0 @@ -from abc import ABC, abstractmethod - -from frostfs_testlib.load.interfaces.loader import Loader -from frostfs_testlib.load.k6 import K6 -from frostfs_testlib.load.load_config import LoadParams -from frostfs_testlib.storage.cluster import ClusterNode - - -class ScenarioRunner(ABC): - @abstractmethod - def prepare( - self, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - k6_dir: str, - ): - """Preparation steps before running the load""" - - @abstractmethod - def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): - """Init K6 instances""" - - @abstractmethod - def get_k6_instances(self) -> list[K6]: - """Get K6 instances""" - - @abstractmethod - def start(self): - """Start K6 instances""" - - @abstractmethod - def stop(self): - """Stop K6 instances""" - - @abstractmethod - def preset(self): - """Run preset for load""" - - @property - @abstractmethod - def is_running(self) -> bool: - """Returns True if load is running at the moment""" - - @abstractmethod - def wait_until_finish(self, soft_timeout: int = 0): - """Wait until load is finished""" - - @abstractmethod - def get_results(self) -> dict: - """Get results from K6 run""" - - @abstractmethod - def get_loaders(self) -> list[Loader]: - """Return loaders""" diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py deleted file mode 100644 index 4be33ef..0000000 --- a/src/frostfs_testlib/load/interfaces/summarized.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass, field - -from frostfs_testlib.load.load_config import LoadParams, LoadScenario -from frostfs_testlib.load.load_metrics import get_metrics_object - - -@dataclass -class SummarizedErorrs: - total: int = field(default_factory=int) - percent: float = field(default_factory=float) - threshold: float = field(default_factory=float) - by_node: dict[str, int] = field(default_factory=dict) - - def calc_stats(self, operations): - self.total += sum(self.by_node.values()) - - if not operations: - return - - self.percent = self.total / operations * 100 - - -@dataclass -class SummarizedLatencies: - avg: float = field(default_factory=float) - min: float = field(default_factory=float) - max: float = field(default_factory=float) - by_node: dict[str, dict[str, int]] = field(default_factory=dict) - - def calc_stats(self): - if not self.by_node: - return - - avgs = [lt["avg"] for lt in self.by_node.values()] - self.avg = sum(avgs) / len(avgs) - - minimal = [lt["min"] for lt in self.by_node.values()] - self.min = min(minimal) - - maximum = [lt["max"] for lt in self.by_node.values()] - self.max = max(maximum) - - -@dataclass -class SummarizedStats: - threads: int = field(default_factory=int) - requested_rate: int = field(default_factory=int) - operations: int = field(default_factory=int) - rate: float = field(default_factory=float) - throughput: float = field(default_factory=float) - latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies) - errors: SummarizedErorrs = field(default_factory=SummarizedErorrs) - total_bytes: int = field(default_factory=int) - passed: bool = True - - def calc_stats(self): - self.errors.calc_stats(self.operations) - self.latencies.calc_stats() - self.passed = self.errors.percent <= self.errors.threshold - - @staticmethod - def collect(load_params: LoadParams, load_summaries: dict) -> dict[str, "SummarizedStats"]: - if load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: - delete_vus = max(load_params.preallocated_deleters or 0, load_params.max_deleters or 0) - write_vus = max(load_params.preallocated_writers or 0, load_params.max_writers or 0) - read_vus = max(load_params.preallocated_readers or 0, load_params.max_readers or 0) - else: - write_vus = load_params.writers - read_vus = load_params.readers - delete_vus = load_params.deleters - - summarized = { - "Write": SummarizedStats(threads=write_vus, requested_rate=load_params.write_rate), - "Read": SummarizedStats(threads=read_vus, requested_rate=load_params.read_rate), - "Delete": SummarizedStats(threads=delete_vus, requested_rate=load_params.delete_rate), - } - - for node_key, load_summary in load_summaries.items(): - metrics = get_metrics_object(load_params.scenario, load_summary) - for operation in metrics.operations: - target = summarized[operation._NAME] - if not operation.total_iterations: - continue - target.operations += operation.total_iterations - target.rate += operation.rate - target.latencies.by_node[node_key] = operation.latency - target.throughput += operation.throughput - target.errors.threshold = load_params.error_threshold - target.total_bytes += operation.total_bytes - if operation.failed_iterations: - target.errors.by_node[node_key] = operation.failed_iterations - - for operation in summarized.values(): - operation.calc_stats() - - return summarized diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 3e62a16..2fa2c00 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -1,27 +1,27 @@ import json import logging -import math import os -from dataclasses import dataclass -from datetime import datetime -from threading import Event +from dataclasses import dataclass, fields from time import sleep from typing import Any -from urllib.parse import urlparse -from frostfs_testlib import reporter -from frostfs_testlib.credentials.interfaces import User -from frostfs_testlib.load.interfaces.loader import Loader -from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType +from frostfs_testlib.load.load_config import ( + K6ProcessAllocationStrategy, + LoadParams, + LoadScenario, + LoadType, +) from frostfs_testlib.processes.remote_process import RemoteProcess -from frostfs_testlib.resources.common import STORAGE_USER_NAME -from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, LOAD_NODE_SSH_USER from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import wait_for_success EXIT_RESULT_CODE = 0 logger = logging.getLogger("NeoLogger") +reporter = get_reporter() @dataclass @@ -42,193 +42,141 @@ class K6: endpoints: list[str], k6_dir: str, shell: Shell, - loader: Loader, - user: User, + load_node: str, + wallet: WalletInfo, ): if load_params.scenario is None: raise RuntimeError("Scenario should not be none") - self.load_params = load_params + self.load_params: LoadParams = load_params self.endpoints = endpoints - self.loader = loader - self.shell = shell - self.user = user - self.preset_output: str = "" + self.load_node: str = load_node + self.shell: Shell = shell + self.wallet = wallet + self.scenario: LoadScenario = load_params.scenario self.summary_json: str = os.path.join( self.load_params.working_dir, - f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json", + f"{self.load_params.load_id}_{self.scenario.value}_summary.json", ) self._k6_dir: str = k6_dir - command = ( - f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} " - f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" - ) - remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None - process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify" - self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id) - - def _get_fill_percents(self): - fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n") - return [line.split() for line in fill_percents][:-1] - - def check_fill_percent(self): - fill_percents = self._get_fill_percents() - percent_mean = 0 - for line in fill_percents: - percent_mean += float(line[1].split("%")[0]) - percent_mean = percent_mean / len(fill_percents) - logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}") - return percent_mean >= self.load_params.fill_percent - @property def process_dir(self) -> str: return self._k6_process.process_dir + @reporter.step_deco("Preset containers and objects") def preset(self) -> str: - with reporter.step(f"Run preset on loader {self.loader.ip} for endpoints {self.endpoints}"): - preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py" - preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py" - preset_map = { - LoadType.gRPC: preset_grpc, - LoadType.S3: preset_s3, - LoadType.HTTP: preset_grpc, - } + preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py" + preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py" + preset_map = { + LoadType.gRPC: preset_grpc, + LoadType.S3: preset_s3, + LoadType.HTTP: preset_grpc, + } - base_args = { - preset_grpc: [ - preset_grpc, - f"--endpoint {','.join(self.endpoints)}", - f"--wallet {self.user.wallet.path} ", - f"--config {self.user.wallet.config_path} ", - ], - preset_s3: [ - preset_s3, - f"--endpoint {','.join(self.endpoints)}", - ], - } + base_args = { + preset_grpc: [ + preset_grpc, + f"--endpoint {self.endpoints[0]}", + f"--wallet {self.wallet.path} ", + f"--config {self.wallet.config_path} ", + ], + preset_s3: [ + preset_s3, + f"--endpoint {self.endpoints[0]}", + ], + } - preset_scenario = preset_map[self.load_params.load_type] - command_args = base_args[preset_scenario].copy() + preset_scenario = preset_map[self.load_params.load_type] + command_args = base_args[preset_scenario].copy() - command_args += self.load_params.get_preset_arguments() + command_args += [ + f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'" + for field in fields(self.load_params) + if field.metadata + and self.scenario in field.metadata["applicable_scenarios"] + and field.metadata["preset_argument"] + and getattr(self.load_params, field.name) is not None + ] - command = " ".join(command_args) - result = self.shell.exec(command) + if self.load_params.preset: + command_args += [ + f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'" + for field in fields(self.load_params.preset) + if field.metadata + and self.scenario in field.metadata["applicable_scenarios"] + and field.metadata["preset_argument"] + and getattr(self.load_params.preset, field.name) is not None + ] - assert result.return_code == EXIT_RESULT_CODE, f"Return code of preset is not zero: {result.stdout}" + command = " ".join(command_args) + result = self.shell.exec(command) - self.preset_output = result.stdout.strip("\n") - return self.preset_output + assert ( + result.return_code == EXIT_RESULT_CODE + ), f"Return code of preset is not zero: {result.stdout}" + return result.stdout.strip("\n") - @reporter.step("Generate K6 variables") - def _generate_k6_variables(self) -> str: - env_vars = self.load_params.get_k6_vars() + @reporter.step_deco("Generate K6 command") + def _generate_env_variables(self) -> str: + env_vars = { + field.metadata["env_variable"]: getattr(self.load_params, field.name) + for field in fields(self.load_params) + if field.metadata + and self.scenario in field.metadata["applicable_scenarios"] + and field.metadata["env_variable"] + and getattr(self.load_params, field.name) is not None + } + + if self.load_params.preset: + env_vars.update( + { + field.metadata["env_variable"]: getattr(self.load_params.preset, field.name) + for field in fields(self.load_params.preset) + if field.metadata + and self.scenario in field.metadata["applicable_scenarios"] + and field.metadata["env_variable"] + and getattr(self.load_params.preset, field.name) is not None + } + ) env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) env_vars["SUMMARY_JSON"] = self.summary_json - reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") - return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) - - @reporter.step("Generate env variables") - def _generate_env_variables(self) -> str: - env_vars = self.load_params.get_env_vars() - if not env_vars: - return "" - reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables") - return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " " - - def get_start_time(self) -> datetime: - return datetime.fromtimestamp(self._k6_process.start_time()) - - def get_end_time(self) -> datetime: - return datetime.fromtimestamp(self._k6_process.end_time()) + reporter.attach( + "\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables" + ) + return " ".join( + [f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None] + ) + @reporter.step_deco("Start K6 on initiator") def start(self) -> None: - with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): - self._k6_process.start() - - def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None: - with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): - if self.load_params.scenario == LoadScenario.VERIFY: - timeout = self.load_params.verify_time or 0 - else: - timeout = self.load_params.load_time or 0 - - start_time = int(self.get_start_time().timestamp()) - - current_time = int(datetime.utcnow().timestamp()) - working_time = current_time - start_time - remaining_time = timeout - working_time - - setup_teardown_time = ( - int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip()) - ) - remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time - timeout = remaining_time_including_setup_and_teardown - - if soft_timeout: - timeout = min(timeout, soft_timeout) - - original_timeout = timeout - - timeouts = { - "K6 start time": start_time, - "Current time": current_time, - "K6 working time": working_time, - "Remaining time for load": remaining_time, - "Setup and teardown": setup_teardown_time, - "Remaining time including setup/teardown": remaining_time_including_setup_and_teardown, - "Soft timeout": soft_timeout, - "Selected timeout": original_timeout, - } - - reporter.attach("\n".join([f"{k}: {v}" for k, v in timeouts.items()]), "timeouts.txt") - - min_wait_interval = 10 - wait_interval = min_wait_interval - if self._k6_process is None: - assert "No k6 instances were executed" - - while timeout > 0: - if not self.load_params.fill_percent is None: - with reporter.step(f"Check the percentage of filling of all data disks on the node"): - if self.check_fill_percent(): - logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%") - event.set() - self.stop() - return - - if event.is_set(): - self.stop() - return - - if not self._k6_process.running(): - return - - remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" - remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" - logger.info( - f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds..." - ) - sleep(wait_interval) - timeout -= min(timeout, wait_interval) - wait_interval = max( - min(timeout, int(math.log2(timeout + 1)) * 15) - min_wait_interval, - min_wait_interval, - ) + command = ( + f"{self._k6_dir}/k6 run {self._generate_env_variables()} " + f"{self._k6_dir}/scenarios/{self.scenario.value}.js" + ) + self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir) + @reporter.step_deco("Wait until K6 is finished") + def wait_until_finished(self, timeout: int = 0, k6_should_be_running: bool = False) -> None: + wait_interval = 10 + if self._k6_process is None: + assert "No k6 instances were executed" + if k6_should_be_running: + assert self._k6_process.running(), "k6 should be running." + while timeout > 0: if not self._k6_process.running(): return - - self.stop() - if not soft_timeout: - raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") + logger.info(f"K6 is running. Waiting {wait_interval} seconds...") + sleep(wait_interval) + timeout -= wait_interval + self.stop() + raise TimeoutError(f"Expected K6 finished in {timeout} sec.") def get_results(self) -> Any: - with reporter.step(f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}"): + with reporter.step(f"K6 results from {self.load_node}"): self.__log_output() if not self.summary_json: @@ -236,30 +184,33 @@ class K6: summary_text = self.shell.exec(f"cat {self.summary_json}").stdout summary_json = json.loads(summary_text) - endpoint = urlparse(self.endpoints[0]).netloc or self.endpoints[0] + allure_filenames = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.load_params.scenario.value}_summary.json", - K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.load_params.scenario.value}_{endpoint}_summary.json", + K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.load_node}_{self.scenario.value}_summary.json", + K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.load_node}_{self.scenario.value}_{self.endpoints[0]}_summary.json", } allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy] reporter.attach(summary_text, allure_filename) return summary_json + @reporter.step_deco("Stop K6") def stop(self) -> None: - with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"): - if self.is_running(): - self._k6_process.stop() + if self.is_running: + self._k6_process.stop() - self._wait_until_process_end() + self._wait_until_process_end() + @property def is_running(self) -> bool: if self._k6_process: return self._k6_process.running() return False - @reporter.step("Wait until K6 process end") - @wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") + @reporter.step_deco("Wait until process end") + @wait_for_success( + K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout" + ) def _wait_until_process_end(self): return self._k6_process.running() diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 3830203..4e67321 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -1,38 +1,7 @@ -import math import os -from dataclasses import dataclass, field, fields, is_dataclass +from dataclasses import dataclass, field from enum import Enum -from types import MappingProxyType -from typing import Any, Callable, Optional, get_args - -from frostfs_testlib.utils.converting_utils import calc_unit - - -def convert_time_to_seconds(time: int | str | None) -> int: - if time is None: - return None - if str(time).isdigit(): - seconds = int(time) - else: - days, hours, minutes = 0, 0, 0 - if "d" in time: - days, time = time.split("d") - if "h" in time: - hours, time = time.split("h") - if "min" in time: - minutes = time.replace("min", "") - seconds = int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60 - return seconds - - -def force_list(input: str | list[str]): - if input is None: - return None - - if isinstance(input, list): - return list(map(str.strip, input)) - - return [input.strip()] +from typing import Optional class LoadType(Enum): @@ -46,17 +15,8 @@ class LoadScenario(Enum): gRPC_CAR = "grpc_car" S3 = "s3" S3_CAR = "s3_car" - S3_MULTIPART = "s3_multipart" - S3_LOCAL = "s3local" HTTP = "http" VERIFY = "verify" - LOCAL = "local" - - -class ReadFrom(Enum): - REGISTRY = "registry" - PRESET = "preset" - MANUAL = "manual" all_load_scenarios = [ @@ -65,57 +25,29 @@ all_load_scenarios = [ LoadScenario.HTTP, LoadScenario.S3_CAR, LoadScenario.gRPC_CAR, - LoadScenario.LOCAL, - LoadScenario.S3_MULTIPART, - LoadScenario.S3_LOCAL, ] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] -constant_vus_scenarios = [ - LoadScenario.gRPC, - LoadScenario.S3, - LoadScenario.HTTP, - LoadScenario.LOCAL, - LoadScenario.S3_MULTIPART, - LoadScenario.S3_LOCAL, -] +constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] -grpc_preset_scenarios = [ - LoadScenario.gRPC, - LoadScenario.HTTP, - LoadScenario.gRPC_CAR, - LoadScenario.LOCAL, -] -s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL] - - -@dataclass -class MetaField: - name: str - metadata: MappingProxyType - value: Any +grpc_preset_scenarios = [LoadScenario.gRPC, LoadScenario.HTTP, LoadScenario.gRPC_CAR] +s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR] def metadata_field( applicable_scenarios: list[LoadScenario], preset_param: Optional[str] = None, scenario_variable: Optional[str] = None, - string_repr: Optional[bool] = True, distributed: Optional[bool] = False, - formatter: Optional[Callable] = None, - env_variable: Optional[str] = None, ): return field( default=None, metadata={ "applicable_scenarios": applicable_scenarios, "preset_argument": preset_param, - "scenario_variable": scenario_variable, - "string_repr": string_repr, + "env_variable": scenario_variable, "distributed": distributed, - "formatter": formatter, - "env_variable": env_variable, }, ) @@ -129,8 +61,6 @@ class NodesSelectionStrategy(Enum): ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST" # Select ONE random node except under test (useful for failover). RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST" - # Select node under test - NODE_UNDER_TEST = "NODE_UNDER_TEST" class EndpointSelectionStrategy(Enum): @@ -152,77 +82,33 @@ class K6ProcessAllocationStrategy(Enum): PER_ENDPOINT = "PER_ENDPOINT" -class MetaConfig: - def _get_field_formatter(self, field_name: str) -> Callable | None: - data_fields = fields(self) - formatters = [ - field.metadata["formatter"] - for field in data_fields - if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None - ] - if formatters: - return formatters[0] - - return None - - def __setattr__(self, field_name, value): - formatter = self._get_field_formatter(field_name) - if formatter: - value = formatter(value) - - super().__setattr__(field_name, value) - - @dataclass -class Preset(MetaConfig): +class Preset: # ------ COMMON ------ # Amount of objects which should be created - objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False) + objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None) # Preset json. Filled automatically. - pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) + pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON") # Workers count for preset - workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) - # TODO: Deprecated. Acl for container/buckets - acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) - # APE rule for containers instead of deprecated ACL - rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list) + workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None) # ------ GRPC ------ # Amount of containers which should be created - containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) + containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None) # Container placement policy for containers for gRPC - container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list) - # Number of retries for creation of container - container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False) + container_placement_policy: Optional[str] = metadata_field( + grpc_preset_scenarios, "policy", None + ) # ------ S3 ------ # Amount of buckets which should be created - buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False) + buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None) # S3 region (AKA placement policy for S3 buckets) - s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list) - - # Delay between containers creation and object upload for preset - object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False) - - # Flag to control preset erorrs - ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False) - - # Flag to ensure created containers store data on local endpoints - local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False) + s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None) @dataclass -class PrometheusParams(MetaConfig): - # Prometheus server URL - server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False) - # Prometheus trend stats - trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False) - # Additional tags - metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False) - - -@dataclass -class LoadParams(MetaConfig): +class LoadParams: # ------- CONTROL PARAMS ------- # Load type can be gRPC, HTTP, S3. load_type: LoadType @@ -239,253 +125,90 @@ class LoadParams(MetaConfig): verify: Optional[bool] = None # Just id for load so distinct it between runs. Filled automatically. load_id: Optional[str] = None - # Acceptable number of load errors in % - # 100 means 100% errors allowed - # 1.5 means 1.5% errors allowed - # 0 means no errors allowed - error_threshold: Optional[float] = None # Working directory working_dir: Optional[str] = None # Preset for the k6 run preset: Optional[Preset] = None - # K6 download url - k6_url: Optional[str] = None - # Requests module url - requests_module_url: Optional[str] = None - # aws cli download url - awscli_url: Optional[str] = None - # No ssl verification flag - no_verify_ssl: Optional[bool] = metadata_field( - [ - LoadScenario.S3, - LoadScenario.S3_CAR, - LoadScenario.S3_MULTIPART, - LoadScenario.S3_LOCAL, - LoadScenario.VERIFY, - LoadScenario.HTTP, - ], - "no-verify-ssl", - "NO_VERIFY_SSL", - False, - ) - # Percentage of filling of all data disks on all nodes - fill_percent: Optional[float] = None - # if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved. - max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB") - # if set, the payload is generated on the fly and is not read into memory fully. - streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) - # Output format - output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False) - # Prometheus params - prometheus: Optional[PrometheusParams] = None # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. - load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds) + load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION") # Object size in KB for load and preset. - object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) - # For read operations, controls from which set get objects to read - read_from: Optional[ReadFrom] = None - # For read operations done from REGISTRY, controls delay which object should live before it will be used for read operation - read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", False) + object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE") # Output registry K6 file. Filled automatically. - registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) - # In case if we want to use custom registry file left from another load run - custom_registry: Optional[str] = None - # In case if we want to use custom registry file left from another load run - force_fresh_registry: Optional[bool] = None + registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE") # Specifies the minimum duration of every single execution (i.e. iteration). # Any iterations that are shorter than this value will cause that VU to # sleep for the remainder of the time until the specified minimum duration is reached. - min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False) - # Prepare/cut objects locally on client before sending - prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False) + min_iteration_duration: Optional[str] = metadata_field( + all_load_scenarios, None, "K6_MIN_ITERATION_DURATION" + ) # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout - setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) - - # Delay for read operations in case if we read from registry - read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", None, False) - - # Initialization time for each VU for k6 load - vu_init_time: Optional[float] = None + setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT") # ------- CONSTANT VUS SCENARIO PARAMS ------- # Amount of Writers VU. - writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True) + writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True) # Amount of Readers VU. - readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True, True) + readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True) # Amount of Deleters VU. - deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True, True) + deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True) # ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS ------- # Number of iterations to start during each timeUnit period for write. - write_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True) + write_rate: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "WRITE_RATE", True + ) # Number of iterations to start during each timeUnit period for read. - read_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "READ_RATE", True, True) + read_rate: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "READ_RATE", True + ) # Number of iterations to start during each timeUnit period for delete. - delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True) + delete_rate: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "DELETE_RATE", True + ) # Amount of preAllocatedVUs for write operations. - preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True) + preallocated_writers: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True + ) # Amount of maxVUs for write operations. - max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True) + max_writers: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "MAX_WRITERS", True + ) # Amount of preAllocatedVUs for read operations. - preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True) + preallocated_readers: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True + ) # Amount of maxVUs for read operations. - max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True) + max_readers: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "MAX_READERS", True + ) # Amount of preAllocatedVUs for read operations. - preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True) + preallocated_deleters: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True + ) # Amount of maxVUs for delete operations. - max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True) - - # Multipart - # Number of parts to upload in parallel - writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True) - # part size must be greater than (5 MB) - write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False) + max_deleters: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "MAX_DELETERS", True + ) # Period of time to apply the rate value. - time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False) + time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT") # ------- VERIFY SCENARIO PARAMS ------- # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). - verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False) + verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT") # Amount of Verification VU. - verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True, False) - - # ------- LOCAL SCENARIO PARAMS ------- - # Config file location (filled automatically) - config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False) - # Config directory location (filled automatically) - config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False) + verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True) def set_id(self, load_id): self.load_id = load_id - - if self.read_from == ReadFrom.REGISTRY: - self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") - - # For now it's okay to have it this way - if self.custom_registry is not None: - self.registry_file = self.custom_registry - - if self.read_from == ReadFrom.PRESET: - self.registry_file = None - + self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") if self.preset: self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") - - def get_k6_vars(self): - env_vars = { - meta_field.metadata["scenario_variable"]: meta_field.value - for meta_field in self._get_meta_fields(self) - if self.scenario in meta_field.metadata["applicable_scenarios"] - and meta_field.metadata["scenario_variable"] - and meta_field.value is not None - } - - return env_vars - - def get_env_vars(self): - env_vars = { - meta_field.metadata["env_variable"]: meta_field.value - for meta_field in self._get_meta_fields(self) - if self.scenario in meta_field.metadata["applicable_scenarios"] - and meta_field.metadata["env_variable"] - and meta_field.value is not None - } - - return env_vars - - def __post_init__(self): - default_scenario_map = { - LoadType.gRPC: LoadScenario.gRPC, - LoadType.HTTP: LoadScenario.HTTP, - LoadType.S3: LoadScenario.S3, - } - - if self.scenario is None: - self.scenario = default_scenario_map[self.load_type] - - def get_preset_arguments(self): - command_args = [ - self._get_preset_argument(meta_field) - for meta_field in self._get_meta_fields(self) - if self.scenario in meta_field.metadata["applicable_scenarios"] - and meta_field.metadata["preset_argument"] - and meta_field.value is not None - and self._get_preset_argument(meta_field) - ] - - return command_args - - def get_init_time(self) -> int: - return math.ceil(self._get_total_vus() * self.vu_init_time) - - def _get_total_vus(self) -> int: - vu_fields = ["writers", "preallocated_writers", "readers", "preallocated_readers"] - data_fields = [getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields] - return sum(data_fields) - - def _get_applicable_fields(self): - applicable_fields = [ - meta_field - for meta_field in self._get_meta_fields(self) - if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.value - ] - - return applicable_fields - - @staticmethod - def _get_preset_argument(meta_field: MetaField) -> str: - if isinstance(meta_field.value, bool): - # For preset calls, bool values are passed with just -- if the value is True - return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else "" - - if isinstance(meta_field.value, list): - return ( - " ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else "" - ) - - return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'" - - @staticmethod - def _get_meta_fields(instance) -> list[MetaField]: - data_fields = fields(instance) - - fields_with_data = [ - MetaField(field.name, field.metadata, getattr(instance, field.name)) - for field in data_fields - if field.metadata and getattr(instance, field.name) is not None - ] - - for field in data_fields: - actual_field_type = get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) - if is_dataclass(actual_field_type) and getattr(instance, field.name): - fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name)) - - return fields_with_data or [] - - def __str__(self) -> str: - load_type_str = self.scenario.value if self.scenario else self.load_type.value - # TODO: migrate load_params defaults to testlib - if self.object_size is not None: - size, unit = calc_unit(self.object_size, 1) - static_params = [f"{load_type_str} {size:.4g} {unit}"] - else: - static_params = [f"{load_type_str}"] - - dynamic_params = [ - f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"] - ] - params = ", ".join(static_params + dynamic_params) - - return params - - def __repr__(self) -> str: - return self.__str__() diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 035ce8b..50d7b38 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -1,54 +1,83 @@ from abc import ABC -from typing import Any, Optional +from typing import Any from frostfs_testlib.load.load_config import LoadScenario -class OperationMetric(ABC): - _NAME = "" - _SUCCESS = "" - _ERRORS = "" - _THROUGHPUT = "" - _LATENCY = "" +class MetricsBase(ABC): + _WRITE_SUCCESS = "" + _WRITE_ERRORS = "" + _WRITE_THROUGHPUT = "data_sent" + + _READ_SUCCESS = "" + _READ_ERRORS = "" + _READ_THROUGHPUT = "data_received" + + _DELETE_SUCCESS = "" + _DELETE_ERRORS = "" def __init__(self, summary) -> None: self.summary = summary self.metrics = summary["metrics"] @property - def total_iterations(self) -> int: - return self._get_metric(self._SUCCESS) + self._get_metric(self._ERRORS) + def write_total_iterations(self) -> int: + return self._get_metric(self._WRITE_SUCCESS) + self._get_metric(self._WRITE_ERRORS) @property - def success_iterations(self) -> int: - return self._get_metric(self._SUCCESS) + def write_success_iterations(self) -> int: + return self._get_metric(self._WRITE_SUCCESS) @property - def latency(self) -> dict: - return self._get_metric(self._LATENCY) + def write_rate(self) -> float: + return self._get_metric_rate(self._WRITE_SUCCESS) @property - def rate(self) -> float: - return self._get_metric_rate(self._SUCCESS) + def write_failed_iterations(self) -> int: + return self._get_metric(self._WRITE_ERRORS) @property - def failed_iterations(self) -> int: - return self._get_metric(self._ERRORS) + def write_throughput(self) -> float: + return self._get_metric_rate(self._WRITE_THROUGHPUT) @property - def throughput(self) -> float: - return self._get_metric_rate(self._THROUGHPUT) + def read_total_iterations(self) -> int: + return self._get_metric(self._READ_SUCCESS) + self._get_metric(self._READ_ERRORS) @property - def total_bytes(self) -> float: - return self._get_metric(self._THROUGHPUT) + def read_success_iterations(self) -> int: + return self._get_metric(self._READ_SUCCESS) + + @property + def read_rate(self) -> int: + return self._get_metric_rate(self._READ_SUCCESS) + + @property + def read_failed_iterations(self) -> int: + return self._get_metric(self._READ_ERRORS) + + @property + def read_throughput(self) -> float: + return self._get_metric_rate(self._READ_THROUGHPUT) + + @property + def delete_total_iterations(self) -> int: + return self._get_metric(self._DELETE_SUCCESS) + self._get_metric(self._DELETE_ERRORS) + + @property + def delete_success_iterations(self) -> int: + return self._get_metric(self._DELETE_SUCCESS) + + @property + def delete_failed_iterations(self) -> int: + return self._get_metric(self._DELETE_ERRORS) + + @property + def delete_rate(self) -> int: + return self._get_metric_rate(self._DELETE_SUCCESS) def _get_metric(self, metric: str) -> int: - metrics_method_map = { - "counter": self._get_counter_metric, - "gauge": self._get_gauge_metric, - "trend": self._get_trend_metrics, - } + metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric} if metric not in self.metrics: return 0 @@ -56,7 +85,9 @@ class OperationMetric(ABC): metric = self.metrics[metric] metric_type = metric["type"] if metric_type not in metrics_method_map: - raise Exception(f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}") + raise Exception( + f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}" + ) return metrics_method_map[metric_type](metric) @@ -69,7 +100,9 @@ class OperationMetric(ABC): metric = self.metrics[metric] metric_type = metric["type"] if metric_type not in metrics_method_map: - raise Exception(f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}") + raise Exception( + f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}" + ) return metrics_method_map[metric_type](metric) @@ -82,149 +115,38 @@ class OperationMetric(ABC): def _get_gauge_metric(self, metric: str) -> int: return metric["values"]["value"] - def _get_trend_metrics(self, metric: str) -> int: - return metric["values"] - - -class WriteOperationMetric(OperationMetric): - _NAME = "Write" - _SUCCESS = "" - _ERRORS = "" - _THROUGHPUT = "data_sent" - _LATENCY = "" - - -class ReadOperationMetric(OperationMetric): - _NAME = "Read" - _SUCCESS = "" - _ERRORS = "" - _THROUGHPUT = "data_received" - _LATENCY = "" - - -class DeleteOperationMetric(OperationMetric): - _NAME = "Delete" - _SUCCESS = "" - _ERRORS = "" - _THROUGHPUT = "" - _LATENCY = "" - - -class GrpcWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "frostfs_obj_put_success" - _ERRORS = "frostfs_obj_put_fails" - _LATENCY = "frostfs_obj_put_duration" - - -class GrpcReadOperationMetric(ReadOperationMetric): - _SUCCESS = "frostfs_obj_get_success" - _ERRORS = "frostfs_obj_get_fails" - _LATENCY = "frostfs_obj_get_duration" - - -class GrpcDeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "frostfs_obj_delete_success" - _ERRORS = "frostfs_obj_delete_fails" - _LATENCY = "frostfs_obj_delete_duration" - - -class S3WriteOperationMetric(WriteOperationMetric): - _SUCCESS = "aws_obj_put_success" - _ERRORS = "aws_obj_put_fails" - _LATENCY = "aws_obj_put_duration" - - -class S3ReadOperationMetric(ReadOperationMetric): - _SUCCESS = "aws_obj_get_success" - _ERRORS = "aws_obj_get_fails" - _LATENCY = "aws_obj_get_duration" - - -class S3DeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "aws_obj_delete_success" - _ERRORS = "aws_obj_delete_fails" - _LATENCY = "aws_obj_delete_duration" - - -class S3LocalWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "s3local_obj_put_success" - _ERRORS = "s3local_obj_put_fails" - _LATENCY = "s3local_obj_put_duration" - - -class S3LocalReadOperationMetric(ReadOperationMetric): - _SUCCESS = "s3local_obj_get_success" - _ERRORS = "s3local_obj_get_fails" - _LATENCY = "s3local_obj_get_duration" - - -class LocalWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "local_obj_put_success" - _ERRORS = "local_obj_put_fails" - _LATENCY = "local_obj_put_duration" - - -class LocalReadOperationMetric(ReadOperationMetric): - _SUCCESS = "local_obj_get_success" - _ERRORS = "local_obj_get_fails" - - -class LocalDeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "local_obj_delete_success" - _ERRORS = "local_obj_delete_fails" - - -class VerifyReadOperationMetric(ReadOperationMetric): - _SUCCESS = "verified_obj" - _ERRORS = "invalid_obj" - - -class MetricsBase(ABC): - def __init__(self) -> None: - self.write: Optional[WriteOperationMetric] = None - self.read: Optional[ReadOperationMetric] = None - self.delete: Optional[DeleteOperationMetric] = None - - @property - def operations(self) -> list[OperationMetric]: - return [metric for metric in [self.write, self.read, self.delete] if metric is not None] - class GrpcMetrics(MetricsBase): - def __init__(self, summary) -> None: - super().__init__() - self.write = GrpcWriteOperationMetric(summary) - self.read = GrpcReadOperationMetric(summary) - self.delete = GrpcDeleteOperationMetric(summary) + _WRITE_SUCCESS = "frostfs_obj_put_total" + _WRITE_ERRORS = "frostfs_obj_put_fails" + + _READ_SUCCESS = "frostfs_obj_get_total" + _READ_ERRORS = "frostfs_obj_get_fails" + + _DELETE_SUCCESS = "frostfs_obj_delete_total" + _DELETE_ERRORS = "frostfs_obj_delete_fails" class S3Metrics(MetricsBase): - def __init__(self, summary) -> None: - super().__init__() - self.write = S3WriteOperationMetric(summary) - self.read = S3ReadOperationMetric(summary) - self.delete = S3DeleteOperationMetric(summary) + _WRITE_SUCCESS = "aws_obj_put_total" + _WRITE_ERRORS = "aws_obj_put_fails" + _READ_SUCCESS = "aws_obj_get_total" + _READ_ERRORS = "aws_obj_get_fails" -class S3LocalMetrics(MetricsBase): - def __init__(self, summary) -> None: - super().__init__() - self.write = S3LocalWriteOperationMetric(summary) - self.read = S3LocalReadOperationMetric(summary) - - -class LocalMetrics(MetricsBase): - def __init__(self, summary) -> None: - super().__init__() - self.write = LocalWriteOperationMetric(summary) - self.read = LocalReadOperationMetric(summary) - self.delete = LocalDeleteOperationMetric(summary) + _DELETE_SUCCESS = "aws_obj_delete_total" + _DELETE_ERRORS = "aws_obj_delete_fails" class VerifyMetrics(MetricsBase): - def __init__(self, summary) -> None: - super().__init__() - self.read = VerifyReadOperationMetric(summary) + _WRITE_SUCCESS = "N/A" + _WRITE_ERRORS = "N/A" + + _READ_SUCCESS = "verified_obj" + _READ_ERRORS = "invalid_obj" + + _DELETE_SUCCESS = "N/A" + _DELETE_ERRORS = "N/A" def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase: @@ -234,10 +156,7 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr LoadScenario.HTTP: GrpcMetrics, LoadScenario.S3: S3Metrics, LoadScenario.S3_CAR: S3Metrics, - LoadScenario.S3_MULTIPART: S3Metrics, - LoadScenario.S3_LOCAL: S3LocalMetrics, LoadScenario.VERIFY: VerifyMetrics, - LoadScenario.LOCAL: LocalMetrics, } return class_map[load_type](summary) diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 2dfac26..5f22515 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -1,11 +1,10 @@ from datetime import datetime -from typing import Optional +from typing import Optional, Tuple import yaml -from frostfs_testlib.load.interfaces.summarized import SummarizedStats from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario -from frostfs_testlib.utils.converting_utils import calc_unit +from frostfs_testlib.load.load_metrics import get_metrics_object class LoadReport: @@ -17,15 +16,11 @@ class LoadReport: self.start_time: Optional[datetime] = None self.end_time: Optional[datetime] = None - def set_start_time(self, time: datetime = None): - if time is None: - time = datetime.utcnow() - self.start_time = time + def set_start_time(self): + self.start_time = datetime.utcnow() - def set_end_time(self, time: datetime = None): - if time is None: - time = datetime.utcnow() - self.end_time = time + def set_end_time(self): + self.end_time = datetime.utcnow() def add_summaries(self, load_summaries: dict): self.load_summaries_list.append(load_summaries) @@ -35,7 +30,6 @@ class LoadReport: def get_report_html(self): report_sections = [ - [self.load_params, self._get_load_id_section_html], [self.load_test, self._get_load_params_section_html], [self.load_summaries_list, self._get_totals_section_html], [self.end_time, self._get_test_time_html], @@ -49,8 +43,8 @@ class LoadReport: return html def _get_load_params_section_html(self) -> str: - params: str = yaml.safe_dump([self.load_test], sort_keys=False, indent=2, explicit_start=True) - params = params.replace("\n", "
").replace(" ", " ") + params: str = yaml.safe_dump(self.load_test, sort_keys=False) + params = params.replace("\n", "
") section_html = f"""

Scenario params

{params}
@@ -58,23 +52,25 @@ class LoadReport: return section_html - def _get_load_id_section_html(self) -> str: - section_html = f"""

Load ID: {self.load_params.load_id}

-
""" - - return section_html - def _get_test_time_html(self) -> str: - if not self.start_time or not self.end_time: - return "" - - html = f"""

Scenario duration

+ html = f"""

Scenario duration in UTC time (from agent)

{self.start_time} - {self.end_time}

""" return html + def _calc_unit(self, value: float, skip_units: int = 0) -> Tuple[float, str]: + units = ["B", "KiB", "MiB", "GiB", "TiB"] + + for unit in units[skip_units:]: + if value < 1024: + return value, unit + + value = value / 1024.0 + + return value, unit + def _seconds_to_formatted_duration(self, seconds: int) -> str: """Converts N number of seconds to formatted output ignoring zeroes. Examples: @@ -104,62 +100,57 @@ class LoadReport: model_map = { LoadScenario.gRPC: "closed model", LoadScenario.S3: "closed model", - LoadScenario.S3_MULTIPART: "closed model", LoadScenario.HTTP: "closed model", LoadScenario.gRPC_CAR: "open model", LoadScenario.S3_CAR: "open model", - LoadScenario.LOCAL: "local fill", - LoadScenario.S3_LOCAL: "local fill", } return model_map[self.load_params.scenario] - def _get_operations_sub_section_html(self, operation_type: str, stats: SummarizedStats): + def _get_oprations_sub_section_html( + self, + operation_type: str, + total_operations: int, + requested_rate_str: str, + vus_str: str, + total_rate: float, + throughput: float, + errors: dict[str, int], + ): throughput_html = "" - if stats.throughput > 0: - throughput, unit = calc_unit(stats.throughput) + if throughput > 0: + throughput, unit = self._calc_unit(throughput) throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") - bytes_html = "" - if stats.total_bytes > 0: - total_bytes, total_bytes_unit = calc_unit(stats.total_bytes) - bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}") - per_node_errors_html = "" - for node_key, errors in stats.errors.by_node.items(): - if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: - per_node_errors_html += self._row(f"At {node_key}", errors) + total_errors = 0 + if errors: + total_errors: int = 0 + for node_key, errors in errors.items(): + total_errors += errors + if ( + self.load_params.k6_process_allocation_strategy + == K6ProcessAllocationStrategy.PER_ENDPOINT + ): + per_node_errors_html += self._row(f"At {node_key}", errors) - latency_html = "" - for node_key, latencies in stats.latencies.by_node.items(): - latency_values = "N/A" - if latencies: - latency_values = "" - for param_name, param_val in latencies.items(): - latency_values += f"{param_name}={param_val:.2f}ms " - - latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values) - - object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) + object_size, object_size_unit = self._calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) model = self._get_model_string() - requested_rate_str = f"{stats.requested_rate}op/sec" if stats.requested_rate else "" # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s - short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {stats.threads}th {model} - {throughput:.2f}{unit}/s {stats.rate:.2f}/s" + short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit} {total_rate:.2f}/s" html = f""" - {self._row("Total operations", stats.operations)} - {self._row("OP/sec", f"{stats.rate:.2f}")} - {bytes_html} + {self._row("Total operations", total_operations)} + {self._row("OP/sec", f"{total_rate:.2f}")} {throughput_html} - {latency_html} + {per_node_errors_html} - {self._row("Total", f"{stats.errors.total} ({stats.errors.percent:.2f}%)")} - {self._row("Threshold", f"{stats.errors.threshold:.2f}%")} + {self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")}
{short_summary}
Metrics
Errors


""" @@ -167,12 +158,112 @@ class LoadReport: def _get_totals_section_html(self): html = "" - for i in range(len(self.load_summaries_list)): - html += f"

Load Results for load #{i+1}

" + for i, load_summaries in enumerate(self.load_summaries_list, 1): + html += f"

Load Results for load #{i}

" - summarized = SummarizedStats.collect(self.load_params, self.load_summaries_list[i]) - for operation_type, stats in summarized.items(): - if stats.operations: - html += self._get_operations_sub_section_html(operation_type, stats) + write_operations = 0 + write_op_sec = 0 + write_throughput = 0 + write_errors = {} + requested_write_rate = self.load_params.write_rate + requested_write_rate_str = ( + f"{requested_write_rate}op/sec" if requested_write_rate else "" + ) + + read_operations = 0 + read_op_sec = 0 + read_throughput = 0 + read_errors = {} + requested_read_rate = self.load_params.read_rate + requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else "" + + delete_operations = 0 + delete_op_sec = 0 + delete_errors = {} + requested_delete_rate = self.load_params.delete_rate + requested_delete_rate_str = ( + f"{requested_delete_rate}op/sec" if requested_delete_rate else "" + ) + + if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: + delete_vus = max( + self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0 + ) + write_vus = max( + self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0 + ) + read_vus = max( + self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0 + ) + else: + write_vus = self.load_params.writers + read_vus = self.load_params.readers + delete_vus = self.load_params.deleters + + write_vus_str = f"{write_vus}th" + read_vus_str = f"{read_vus}th" + delete_vus_str = f"{delete_vus}th" + + write_section_required = False + read_section_required = False + delete_section_required = False + + for node_key, load_summary in load_summaries.items(): + metrics = get_metrics_object(self.load_params.scenario, load_summary) + write_operations += metrics.write_total_iterations + if write_operations: + write_section_required = True + write_op_sec += metrics.write_rate + write_throughput += metrics.write_throughput + if metrics.write_failed_iterations: + write_errors[node_key] = metrics.write_failed_iterations + + read_operations += metrics.read_total_iterations + if read_operations: + read_section_required = True + read_op_sec += metrics.read_rate + read_throughput += metrics.read_throughput + if metrics.read_failed_iterations: + read_errors[node_key] = metrics.read_failed_iterations + + delete_operations += metrics.delete_total_iterations + if delete_operations: + delete_section_required = True + delete_op_sec += metrics.delete_rate + if metrics.delete_failed_iterations: + delete_errors[node_key] = metrics.delete_failed_iterations + + if write_section_required: + html += self._get_oprations_sub_section_html( + "Write", + write_operations, + requested_write_rate_str, + write_vus_str, + write_op_sec, + write_throughput, + write_errors, + ) + + if read_section_required: + html += self._get_oprations_sub_section_html( + "Read", + read_operations, + requested_read_rate_str, + read_vus_str, + read_op_sec, + read_throughput, + read_errors, + ) + + if delete_section_required: + html += self._get_oprations_sub_section_html( + "Delete", + delete_operations, + requested_delete_rate_str, + delete_vus_str, + delete_op_sec, + 0, + delete_errors, + ) return html diff --git a/src/frostfs_testlib/load/load_steps.py b/src/frostfs_testlib/load/load_steps.py new file mode 100644 index 0000000..b55ff22 --- /dev/null +++ b/src/frostfs_testlib/load/load_steps.py @@ -0,0 +1,191 @@ +import copy +import itertools +import math +import re +from dataclasses import fields + +from frostfs_testlib.cli import FrostfsAuthmate +from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC +from frostfs_testlib.resources.load_params import ( + BACKGROUND_LOAD_VUS_COUNT_DIVISOR, + LOAD_NODE_SSH_USER, +) +from frostfs_testlib.shell import CommandOptions, SSHShell +from frostfs_testlib.shell.interfaces import InteractiveInput, SshCredentials +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + +reporter = get_reporter() + +STOPPED_HOSTS = [] + + +@reporter.step_deco("Init s3 client on load nodes") +def init_s3_client( + load_nodes: list[str], + load_params: LoadParams, + k6_directory: str, + ssh_credentials: SshCredentials, + nodes_under_load: list[ClusterNode], + wallet: WalletInfo, +): + storage_node = nodes_under_load[0].service(StorageNode) + s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load] + grpc_peer = storage_node.get_rpc_endpoint() + + for load_node in load_nodes: + ssh_client = _get_shell(ssh_credentials, load_node) + frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(ssh_client, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate_exec.secret.issue( + wallet=wallet.path, + peer=grpc_peer, + bearer_rules=f"{k6_directory}/scenarios/files/rules.json", + gate_public_key=s3_public_keys, + container_placement_policy=load_params.preset.container_placement_policy, + container_policy=f"{k6_directory}/scenarios/files/policy.json", + wallet_password=wallet.password, + ).stdout + aws_access_key_id = str( + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( + "aws_access_key_id" + ) + ) + aws_secret_access_key = str( + re.search( + r"secret_access_key.*:\s.(?P\w*)", issue_secret_output + ).group("aws_secret_access_key") + ) + # prompt_pattern doesn't work at the moment + configure_input = [ + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), + InteractiveInput( + prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key + ), + InteractiveInput(prompt_pattern=r".*", input=""), + InteractiveInput(prompt_pattern=r".*", input=""), + ] + ssh_client.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + + +@reporter.step_deco("Prepare K6 instances and objects") +def prepare_k6_instances( + load_nodes: list[str], + ssh_credentials: SshCredentials, + k6_dir: str, + load_params: LoadParams, + endpoints: list[str], + loaders_wallet: WalletInfo, +) -> list[K6]: + k6_load_objects: list[K6] = [] + nodes = itertools.cycle(load_nodes) + + k6_distribution_count = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: len(load_nodes), + K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints), + } + endpoints_generators = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), + K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle( + [[endpoint] for endpoint in endpoints] + ), + } + k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] + endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] + + distributed_load_params_list = _get_distributed_load_params_list( + load_params, k6_processes_count + ) + + for distributed_load_params in distributed_load_params_list: + load_node = next(nodes) + shell = _get_shell(ssh_credentials, load_node) + # Make working_dir directory + shell.exec(f"sudo mkdir -p {distributed_load_params.working_dir}") + shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {distributed_load_params.working_dir}") + + k6_load_object = K6( + distributed_load_params, + next(endpoints_gen), + k6_dir, + shell, + load_node, + loaders_wallet, + ) + k6_load_objects.append(k6_load_object) + if load_params.preset: + k6_load_object.preset() + + return k6_load_objects + + +def _get_shell(ssh_credentials: SshCredentials, load_node: str) -> SSHShell: + ssh_client = SSHShell( + host=load_node, + login=ssh_credentials.ssh_login, + password=ssh_credentials.ssh_password, + private_key_path=ssh_credentials.ssh_key_path, + private_key_passphrase=ssh_credentials.ssh_key_passphrase, + ) + + return ssh_client + + +def _get_distributed_load_params_list( + original_load_params: LoadParams, workers_count: int +) -> list[LoadParams]: + divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) + distributed_load_params: list[LoadParams] = [] + + for i in range(workers_count): + load_params = copy.deepcopy(original_load_params) + # Append #i here in case if multiple k6 processes goes into same load node + load_params.set_id(f"{load_params.load_id}_{i}") + distributed_load_params.append(load_params) + + load_fields = fields(original_load_params) + + for field in load_fields: + if ( + field.metadata + and original_load_params.scenario in field.metadata["applicable_scenarios"] + and field.metadata["distributed"] + and getattr(original_load_params, field.name) is not None + ): + original_value = getattr(original_load_params, field.name) + distribution = _get_distribution(math.ceil(original_value / divisor), workers_count) + for i in range(workers_count): + setattr(distributed_load_params[i], field.name, distribution[i]) + + return distributed_load_params + + +def _get_distribution(clients_count: int, workers_count: int) -> list[int]: + """ + This function will distribute evenly as possible X clients to Y workers. + For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers) + this will return [38, 38, 37, 37]. + + Args: + clients_count: amount of things needs to be distributed. + workers_count: amount of workers. + + Returns: + list of distribution. + """ + if workers_count < 1: + raise Exception("Workers cannot be less then 1") + + # Amount of guaranteed payload on one worker + clients_per_worker = clients_count // workers_count + # Remainder of clients left to be distributed + remainder = clients_count - clients_per_worker * workers_count + + distribution = [ + clients_per_worker + 1 if i < remainder else clients_per_worker + for i in range(workers_count) + ] + return distribution diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index 97b0ffa..1ff63ae 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -1,68 +1,63 @@ -from frostfs_testlib import reporter -from frostfs_testlib.load.interfaces.summarized import SummarizedStats +import logging + from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object +logger = logging.getLogger("NeoLogger") + class LoadVerifier: def __init__(self, load_params: LoadParams) -> None: self.load_params = load_params - def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]: - summarized = SummarizedStats.collect(self.load_params, load_summaries) - issues = [] + def verify_summaries(self, load_summary, verification_summary) -> None: + exceptions = [] - for operation_type, stats in summarized.items(): - if stats.threads and not stats.operations: - issues.append(f"No any {operation_type.lower()} operation was performed") - - if stats.errors.percent > stats.errors.threshold: - rate_str = self._get_rate_str(stats.errors.percent) - issues.append(f"{operation_type} errors exceeded threshold: {rate_str} > {stats.errors.threshold}%") - - return issues - - def collect_verify_issues(self, load_summaries, verification_summaries) -> list[str]: - verify_issues: list[str] = [] - for k6_process_label in load_summaries: - with reporter.step(f"Check verify scenario results for {k6_process_label}"): - verify_issues.extend( - self._collect_verify_issues_on_process( - k6_process_label, - load_summaries[k6_process_label], - verification_summaries[k6_process_label], - ) - ) - return verify_issues - - def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str: - return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%" - - def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]: - issues = [] + if not verification_summary or not load_summary: + logger.info("Can't check load results due to missing summary") load_metrics = get_metrics_object(self.load_params.scenario, load_summary) writers = self.load_params.writers or self.load_params.preallocated_writers or 0 + readers = self.load_params.readers or self.load_params.preallocated_readers or 0 deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 - delete_success = 0 + objects_count = load_metrics.write_success_iterations + fails_count = load_metrics.write_failed_iterations + + if writers > 0: + if objects_count < 1: + exceptions.append("Total put objects should be greater than 0") + if fails_count > 0: + exceptions.append(f"There were {fails_count} failed write operations") + + if readers > 0: + read_count = load_metrics.read_success_iterations + read_fails_count = load_metrics.read_failed_iterations + if read_count < 1: + exceptions.append("Total read operations should be greater than 0") + if read_fails_count > 0: + exceptions.append(f"There were {read_fails_count} failed read operations") if deleters > 0: - delete_success = load_metrics.delete.success_iterations + delete_count = load_metrics.delete_success_iterations + delete_fails_count = load_metrics.delete_failed_iterations + if delete_count < 1: + exceptions.append("Total delete operations should be greater than 0") + if delete_fails_count > 0: + exceptions.append(f"There were {delete_fails_count} failed delete operations") if verification_summary: verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) - verified_objects = verify_metrics.read.success_iterations - invalid_objects = verify_metrics.read.failed_iterations - total_left_objects = load_metrics.write.success_iterations - delete_success + verified_objects = verify_metrics.read_success_iterations + invalid_objects = verify_metrics.read_failed_iterations if invalid_objects > 0: - issues.append(f"There were {invalid_objects} verification fails (hash mismatch).") + exceptions.append(f"There were {invalid_objects} verification fails") # Due to interruptions we may see total verified objects to be less than written on writers count - if abs(total_left_objects - verified_objects) > writers: - issues.append( - f"Verified objects mismatch for {label}. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}." + if abs(objects_count - verified_objects) > writers: + exceptions.append( + f"Verified objects mismatch. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}." ) - return issues + assert not exceptions, "\n".join(exceptions) diff --git a/src/frostfs_testlib/load/loaders.py b/src/frostfs_testlib/load/loaders.py deleted file mode 100644 index 1e0e97f..0000000 --- a/src/frostfs_testlib/load/loaders.py +++ /dev/null @@ -1,60 +0,0 @@ -from frostfs_testlib.load.interfaces.loader import Loader -from frostfs_testlib.resources.load_params import ( - LOAD_NODE_SSH_PASSWORD, - LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, - LOAD_NODE_SSH_PRIVATE_KEY_PATH, - LOAD_NODE_SSH_USER, -) -from frostfs_testlib.shell.interfaces import Shell, SshCredentials -from frostfs_testlib.shell.ssh_shell import SSHShell -from frostfs_testlib.storage.cluster import ClusterNode - - -class RemoteLoader(Loader): - def __init__(self, ssh_credentials: SshCredentials, ip: str) -> None: - self.ssh_credentials = ssh_credentials - self._ip = ip - - @property - def ip(self): - return self._ip - - def get_shell(self) -> Shell: - ssh_client = SSHShell( - host=self.ip, - login=self.ssh_credentials.ssh_login, - password=self.ssh_credentials.ssh_password, - private_key_path=self.ssh_credentials.ssh_key_path, - private_key_passphrase=self.ssh_credentials.ssh_key_passphrase, - ) - - return ssh_client - - @classmethod - def from_ip_list(cls, ip_list: list[str]) -> list[Loader]: - loaders: list[Loader] = [] - ssh_credentials = SshCredentials( - LOAD_NODE_SSH_USER, - LOAD_NODE_SSH_PASSWORD, - LOAD_NODE_SSH_PRIVATE_KEY_PATH, - LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, - ) - - for ip in ip_list: - loaders.append(RemoteLoader(ssh_credentials, ip)) - - return loaders - - -class NodeLoader(Loader): - """When ClusterNode is the loader for itself (for Local scenario only).""" - - def __init__(self, cluster_node: ClusterNode) -> None: - self.cluster_node = cluster_node - - def get_shell(self) -> Shell: - return self.cluster_node.host.get_shell() - - @property - def ip(self): - return self.cluster_node.host_ip diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py deleted file mode 100644 index 1ceac09..0000000 --- a/src/frostfs_testlib/load/runners.py +++ /dev/null @@ -1,466 +0,0 @@ -import copy -import itertools -import math -import time -from dataclasses import fields -from threading import Event -from typing import Optional -from urllib.parse import urlparse - -from frostfs_testlib import reporter -from frostfs_testlib.credentials.interfaces import S3Credentials, User -from frostfs_testlib.load.interfaces.loader import Loader -from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner -from frostfs_testlib.load.k6 import K6 -from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType -from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader -from frostfs_testlib.resources import optionals -from frostfs_testlib.resources.common import STORAGE_USER_NAME -from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES -from frostfs_testlib.shell.command_inspectors import SuInspector -from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.testing import parallel, run_optionally -from frostfs_testlib.testing.test_control import retry -from frostfs_testlib.utils import datetime_utils -from frostfs_testlib.utils.file_keeper import FileKeeper - - -class RunnerBase(ScenarioRunner): - k6_instances: list[K6] - loaders: list[Loader] - - @reporter.step("Run preset on loaders") - def preset(self): - parallel([k6.preset for k6 in self.k6_instances]) - - @reporter.step("Wait until load finish") - def wait_until_finish(self, soft_timeout: int = 0): - event = Event() - parallel([k6.wait_until_finished for k6 in self.k6_instances], event=event, soft_timeout=soft_timeout) - - @property - def is_running(self): - futures = parallel([k6.is_running for k6 in self.k6_instances]) - - return any([future.result() for future in futures]) - - def get_k6_instances(self): - return self.k6_instances - - def get_loaders(self) -> list[Loader]: - return self.loaders - - -class DefaultRunner(RunnerBase): - user: User - - def __init__( - self, - user: User, - load_ip_list: Optional[list[str]] = None, - ) -> None: - if load_ip_list is None: - load_ip_list = LOAD_NODES - self.loaders = RemoteLoader.from_ip_list(load_ip_list) - self.user = user - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Preparation steps") - def prepare( - self, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - k6_dir: str, - ): - if load_params.force_fresh_registry and load_params.custom_registry: - with reporter.step("Forcing fresh registry files"): - parallel(self._force_fresh_registry, self.loaders, load_params) - - if load_params.load_type != LoadType.S3: - return - - with reporter.step("Init s3 client on loaders"): - s3_credentials = self.user.s3_credentials - parallel(self._aws_configure_on_loader, self.loaders, s3_credentials) - - def _force_fresh_registry(self, loader: Loader, load_params: LoadParams): - with reporter.step(f"Forcing fresh registry on {loader.ip}"): - shell = loader.get_shell() - shell.exec(f"rm -f {load_params.registry_file}") - - def _aws_configure_on_loader( - self, - loader: Loader, - s3_credentials: S3Credentials, - ): - with reporter.step(f"Aws configure on {loader.ip}"): - configure_input = [ - InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key), - InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key), - InteractiveInput(prompt_pattern=r".*", input=""), - InteractiveInput(prompt_pattern=r".*", input=""), - ] - loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input)) - - @reporter.step("Init k6 instances") - def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): - self.k6_instances = [] - cycled_loaders = itertools.cycle(self.loaders) - - k6_distribution_count = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: len(self.loaders), - K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints), - } - endpoints_generators = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), - K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle([[endpoint] for endpoint in endpoints]), - } - k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] - endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] - - distributed_load_params_list = self._get_distributed_load_params_list(load_params, k6_processes_count) - - futures = parallel( - self._init_k6_instance, - distributed_load_params_list, - loader=cycled_loaders, - endpoints=endpoints_gen, - k6_dir=k6_dir, - ) - self.k6_instances = [future.result() for future in futures] - - def _init_k6_instance(self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str): - shell = loader.get_shell() - with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): - with reporter.step(f"Make working directory"): - shell.exec(f"sudo mkdir -p {load_params_for_loader.working_dir}") - shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {load_params_for_loader.working_dir}") - - return K6( - load_params_for_loader, - endpoints, - k6_dir, - shell, - loader, - self.user, - ) - - def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]: - divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) - distributed_load_params: list[LoadParams] = [] - - for i in range(workers_count): - load_params = copy.deepcopy(original_load_params) - # Append #i here in case if multiple k6 processes goes into same load node - load_params.set_id(f"{load_params.load_id}_{i}") - distributed_load_params.append(load_params) - - load_fields = fields(original_load_params) - - for field in load_fields: - if ( - field.metadata - and original_load_params.scenario in field.metadata["applicable_scenarios"] - and field.metadata["distributed"] - and getattr(original_load_params, field.name) is not None - ): - original_value = getattr(original_load_params, field.name) - distribution = self._get_distribution(math.ceil(original_value / divisor), workers_count) - for i in range(workers_count): - setattr(distributed_load_params[i], field.name, distribution[i]) - - return distributed_load_params - - def _get_distribution(self, clients_count: int, workers_count: int) -> list[int]: - """ - This function will distribute evenly as possible X clients to Y workers. - For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers) - this will return [38, 38, 37, 37]. - - Args: - clients_count: amount of things needs to be distributed. - workers_count: amount of workers. - - Returns: - list of distribution. - """ - if workers_count < 1: - raise Exception("Workers cannot be less then 1") - - # Amount of guaranteed payload on one worker - clients_per_worker = clients_count // workers_count - # Remainder of clients left to be distributed - remainder = clients_count - clients_per_worker * workers_count - - distribution = [clients_per_worker + 1 if i < remainder else clients_per_worker for i in range(workers_count)] - return distribution - - def start(self): - load_params = self.k6_instances[0].load_params - - parallel([k6.start for k6 in self.k6_instances]) - - wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 - with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): - time.sleep(wait_after_start_time) - - def stop(self): - for k6_instance in self.k6_instances: - k6_instance.stop() - - def get_results(self) -> dict: - results = {} - for k6_instance in self.k6_instances: - if k6_instance.load_params.k6_process_allocation_strategy is None: - raise RuntimeError("k6_process_allocation_strategy should not be none") - - result = k6_instance.get_results() - endpoint = urlparse(k6_instance.endpoints[0]).netloc or k6_instance.endpoints[0] - keys_map = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip, - K6ProcessAllocationStrategy.PER_ENDPOINT: endpoint, - } - key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] - results[key] = result - - return results - - -class LocalRunner(RunnerBase): - cluster_state_controller: ClusterStateController - file_keeper: FileKeeper - user: User - - def __init__( - self, - cluster_state_controller: ClusterStateController, - file_keeper: FileKeeper, - nodes_under_load: list[ClusterNode], - user: User, - ) -> None: - self.cluster_state_controller = cluster_state_controller - self.file_keeper = file_keeper - self.loaders = [NodeLoader(node) for node in nodes_under_load] - self.nodes_under_load = nodes_under_load - self.user = user - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Preparation steps") - def prepare( - self, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - k6_dir: str, - ): - parallel(self.prepare_node, nodes_under_load, k6_dir, load_params) - - @retry(3, 5, expected_result=True) - def allow_user_to_login_in_system(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - - result = None - try: - shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") - self.lock_passwd_on_node(cluster_node) - options = CommandOptions(check=False, extra_inspectors=[SuInspector(STORAGE_USER_NAME)]) - result = shell.exec("whoami", options) - finally: - if not result or result.return_code: - self.restore_passwd_on_node(cluster_node) - return False - - return True - - @reporter.step("Prepare node {cluster_node}") - def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams): - shell = cluster_node.host.get_shell() - - with reporter.step("Allow storage user to login into system"): - self.allow_user_to_login_in_system(cluster_node) - - with reporter.step("Update limits.conf"): - limits_path = "/etc/security/limits.conf" - self.file_keeper.add(cluster_node.storage_node, limits_path) - content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" - shell.exec(f"echo '{content}' | sudo tee {limits_path}") - - with reporter.step("Download K6"): - shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") - shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") - shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}") - shell.exec(f"sudo chmod -R 777 {k6_dir}") - - with reporter.step("chmod 777 wallet related files on loader"): - shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}") - shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}") - - @reporter.step("Init k6 instances") - def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): - self.k6_instances = [] - futures = parallel( - self._init_k6_instance, - self.loaders, - load_params, - k6_dir, - ) - self.k6_instances = [future.result() for future in futures] - - def _init_k6_instance(self, loader: Loader, load_params: LoadParams, k6_dir: str): - shell = loader.get_shell() - with reporter.step(f"Init K6 instance on {loader.ip}"): - with reporter.step(f"Make working directory"): - shell.exec(f"sudo mkdir -p {load_params.working_dir}") - # If we chmod /home/ folder we can no longer ssh to the node - # !! IMPORTANT !! - if ( - load_params.working_dir - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" - ): - shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") - - return K6( - load_params, - ["localhost:8080"], - k6_dir, - shell, - loader, - self.user, - ) - - def start(self): - load_params = self.k6_instances[0].load_params - - self.cluster_state_controller.stop_services_of_type(S3Gate) - self.cluster_state_controller.stop_services_of_type(StorageNode) - - parallel([k6.start for k6 in self.k6_instances]) - - wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 - with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): - time.sleep(wait_after_start_time) - - @reporter.step("Restore passwd on {cluster_node}") - def restore_passwd_on_node(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("sudo chattr -i /etc/passwd") - - @reporter.step("Lock passwd on {cluster_node}") - def lock_passwd_on_node(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("sudo chattr +i /etc/passwd") - - def stop(self): - for k6_instance in self.k6_instances: - k6_instance.stop() - - self.cluster_state_controller.start_all_stopped_services() - - def get_results(self) -> dict: - results = {} - for k6_instance in self.k6_instances: - result = k6_instance.get_results() - results[k6_instance.loader.ip] = result - - parallel(self.restore_passwd_on_node, self.nodes_under_load) - - return results - - -class S3LocalRunner(LocalRunner): - endpoints: list[str] - k6_dir: str - - @reporter.step("Run preset on loaders") - def preset(self): - LocalRunner.preset(self) - with reporter.step(f"Resolve containers in preset"): - parallel(self._resolve_containers_in_preset, self.k6_instances) - - @reporter.step("Resolve containers in preset") - def _resolve_containers_in_preset(self, k6_instance: K6): - k6_instance.shell.exec( - f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}" - ) - - @reporter.step("Init k6 instances") - def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): - self.k6_instances = [] - futures = parallel( - self._init_k6_instance_, - self.loaders, - load_params, - endpoints, - k6_dir, - ) - self.k6_instances = [future.result() for future in futures] - - def _init_k6_instance_(self, loader: Loader, load_params: LoadParams, endpoints: list[str], k6_dir: str): - shell = loader.get_shell() - with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): - with reporter.step(f"Make working directory"): - shell.exec(f"sudo mkdir -p {load_params.working_dir}") - # If we chmod /home/ folder we can no longer ssh to the node - # !! IMPORTANT !! - if ( - load_params.working_dir - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" - ): - shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") - - return K6( - load_params, - self.endpoints, - k6_dir, - shell, - loader, - self.user, - ) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Preparation steps") - def prepare( - self, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - k6_dir: str, - ): - self.k6_dir = k6_dir - parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes) - - @reporter.step("Prepare node {cluster_node}") - def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]): - LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params) - self.endpoints = cluster_node.s3_gate.get_all_endpoints() - shell = cluster_node.host.get_shell() - - with reporter.step("Uninstall previous installation of aws cli"): - shell.exec(f"sudo rm -rf /usr/local/aws-cli") - shell.exec(f"sudo rm -rf /usr/local/bin/aws") - shell.exec(f"sudo rm -rf /usr/local/bin/aws_completer") - - with reporter.step("Install aws cli"): - shell.exec(f"sudo curl {load_params.awscli_url} -o {k6_dir}/awscliv2.zip") - shell.exec(f"sudo unzip -q {k6_dir}/awscliv2.zip -d {k6_dir}") - shell.exec(f"sudo {k6_dir}/aws/install") - - with reporter.step("Install requests python module"): - shell.exec(f"sudo apt-get -y install python3-pip") - shell.exec(f"sudo curl -so {k6_dir}/requests.tar.gz {load_params.requests_module_url}") - shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz") - - with reporter.step(f"Init s3 client on {cluster_node.host_ip}"): - configure_input = [ - InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key), - InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key), - InteractiveInput(prompt_pattern=r".*", input=""), - InteractiveInput(prompt_pattern=r".*", input=""), - ] - shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) diff --git a/src/frostfs_testlib/plugins/__init__.py b/src/frostfs_testlib/plugins/__init__.py index 26b2441..fcd7acc 100644 --- a/src/frostfs_testlib/plugins/__init__.py +++ b/src/frostfs_testlib/plugins/__init__.py @@ -1,6 +1,12 @@ -from importlib.metadata import entry_points +import sys from typing import Any +if sys.version_info < (3, 10): + # On Python prior 3.10 we need to use backport of entry points + from importlib_metadata import entry_points +else: + from importlib.metadata import entry_points + def load_plugin(plugin_group: str, name: str) -> Any: """Loads plugin using entry point specification. @@ -17,16 +23,3 @@ def load_plugin(plugin_group: str, name: str) -> Any: return None plugin = plugins[name] return plugin.load() - - -def load_all(group: str) -> Any: - """Loads all plugins using entry point specification. - - Args: - group: Name of plugin group. - - Returns: - Classes from specified group. - """ - plugins = entry_points(group=group) - return [plugin.load() for plugin in plugins] diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 071675a..7f49000 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -8,40 +8,28 @@ from tenacity import retry from tenacity.stop import stop_after_attempt from tenacity.wait import wait_fixed -from frostfs_testlib import reporter +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import Shell -from frostfs_testlib.shell.command_inspectors import SuInspector -from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions +from frostfs_testlib.shell.interfaces import CommandOptions + +reporter = get_reporter() class RemoteProcess: - def __init__( - self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector], proc_id: str - ): + def __init__(self, cmd: str, process_dir: str, shell: Shell): self.process_dir = process_dir self.cmd = cmd self.stdout_last_line_number = 0 self.stderr_last_line_number = 0 self.pid: Optional[str] = None self.proc_rc: Optional[int] = None - self.proc_start_time: Optional[int] = None - self.proc_end_time: Optional[int] = None self.saved_stdout: Optional[str] = None self.saved_stderr: Optional[str] = None self.shell = shell - self.proc_id: str = proc_id - self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] @classmethod - @reporter.step("Create remote process") - def create( - cls, - command: str, - shell: Shell, - working_dir: str = "/tmp", - user: Optional[str] = None, - proc_id: Optional[str] = None, - ) -> RemoteProcess: + @reporter.step_deco("Create remote process") + def create(cls, command: str, shell: Shell, working_dir: str = "/tmp") -> RemoteProcess: """ Create a process on a remote host. @@ -51,8 +39,6 @@ class RemoteProcess: rc: contains script return code stderr: contains script errors stdout: contains script output - user: user on behalf whom command will be executed - proc_id: process string identificator Args: shell: Shell instance @@ -62,32 +48,16 @@ class RemoteProcess: Returns: RemoteProcess instance for further examination """ - if proc_id is None: - proc_id = f"{uuid.uuid4()}" - - cmd_inspector = SuInspector(user) if user else None remote_process = cls( - cmd=command, - process_dir=os.path.join(working_dir, f"proc_{proc_id}"), - shell=shell, - cmd_inspector=cmd_inspector, - proc_id=proc_id, + cmd=command, process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), shell=shell ) - + remote_process._create_process_dir() + remote_process._generate_command_script(command) + remote_process._start_process() + remote_process.pid = remote_process._get_pid() return remote_process - @reporter.step("Start remote process") - def start(self): - """ - Starts a process on a remote host. - """ - - self._create_process_dir() - self._generate_command_script() - self._start_process() - self.pid = self._get_pid() - - @reporter.step("Get process stdout") + @reporter.step_deco("Get process stdout") def stdout(self, full: bool = False) -> str: """ Method to get process stdout, either fresh info or full. @@ -103,8 +73,7 @@ class RemoteProcess: cur_stdout = self.saved_stdout else: terminal = self.shell.exec( - f"cat {self.process_dir}/stdout", - options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors), + f"cat {self.process_dir}/stdout", options=CommandOptions(no_log=True) ) if self.proc_rc is not None: self.saved_stdout = terminal.stdout @@ -119,7 +88,7 @@ class RemoteProcess: return resulted_stdout return "" - @reporter.step("Get process stderr") + @reporter.step_deco("Get process stderr") def stderr(self, full: bool = False) -> str: """ Method to get process stderr, either fresh info or full. @@ -135,8 +104,7 @@ class RemoteProcess: cur_stderr = self.saved_stderr else: terminal = self.shell.exec( - f"cat {self.process_dir}/stderr", - options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors), + f"cat {self.process_dir}/stderr", options=CommandOptions(no_log=True) ) if self.proc_rc is not None: self.saved_stderr = terminal.stdout @@ -150,131 +118,84 @@ class RemoteProcess: return resulted_stderr return "" - @reporter.step("Get process rc") + @reporter.step_deco("Get process rc") def rc(self) -> Optional[int]: if self.proc_rc is not None: return self.proc_rc - result = self._cat_proc_file("rc") - if not result: - return None - - self.proc_rc = int(result) - return self.proc_rc - - @reporter.step("Get process start time") - def start_time(self) -> Optional[int]: - if self.proc_start_time is not None: - return self.proc_start_time - - result = self._cat_proc_file("start_time") - if not result: - return None - - self.proc_start_time = int(result) - return self.proc_start_time - - @reporter.step("Get process end time") - def end_time(self) -> Optional[int]: - if self.proc_end_time is not None: - return self.proc_end_time - - result = self._cat_proc_file("end_time") - if not result: - return None - - self.proc_end_time = int(result) - return self.proc_end_time - - def _cat_proc_file(self, file: str) -> Optional[str]: - terminal = self.shell.exec( - f"cat {self.process_dir}/{file}", - CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True), - ) + terminal = self.shell.exec(f"cat {self.process_dir}/rc", CommandOptions(check=False)) if "No such file or directory" in terminal.stderr: return None - elif terminal.return_code != 0: - raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") + elif terminal.stderr or terminal.return_code != 0: + raise AssertionError(f"cat process rc was not successful: {terminal.stderr}") - return terminal.stdout + self.proc_rc = int(terminal.stdout) + return self.proc_rc - @reporter.step("Check if process is running") + @reporter.step_deco("Check if process is running") def running(self) -> bool: return self.rc() is None - @reporter.step("Send signal to process") + @reporter.step_deco("Send signal to process") def send_signal(self, signal: int) -> None: - kill_res = self.shell.exec( - f"kill -{signal} {self.pid}", - CommandOptions(check=False, extra_inspectors=self.cmd_inspectors), - ) + kill_res = self.shell.exec(f"kill -{signal} {self.pid}", CommandOptions(check=False)) if "No such process" in kill_res.stderr: return if kill_res.return_code: - raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}") + raise AssertionError( + f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}" + ) - @reporter.step("Stop process") + @reporter.step_deco("Stop process") def stop(self) -> None: self.send_signal(15) - @reporter.step("Kill process") + @reporter.step_deco("Kill process") def kill(self) -> None: self.send_signal(9) - @reporter.step("Clear process directory") + @reporter.step_deco("Clear process directory") def clear(self) -> None: if self.process_dir == "/": raise AssertionError(f"Invalid path to delete: {self.process_dir}") - self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + self.shell.exec(f"rm -rf {self.process_dir}") - @reporter.step("Start remote process") + @reporter.step_deco("Start remote process") def _start_process(self) -> None: self.shell.exec( f"nohup {self.process_dir}/command.sh {self.process_dir}/stdout " - f"2>{self.process_dir}/stderr &", - CommandOptions(extra_inspectors=self.cmd_inspectors), + f"2>{self.process_dir}/stderr &" ) - @reporter.step("Create process directory") + @reporter.step_deco("Create process directory") def _create_process_dir(self) -> None: - self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) - self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) - terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + self.shell.exec(f"mkdir {self.process_dir}") + self.shell.exec(f"chmod 777 {self.process_dir}") + terminal = self.shell.exec(f"realpath {self.process_dir}") self.process_dir = terminal.stdout.strip() - @reporter.step("Get pid") + @reporter.step_deco("Get pid") @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) def _get_pid(self) -> str: - terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors)) + terminal = self.shell.exec(f"cat {self.process_dir}/pid") assert terminal.stdout, f"invalid pid: {terminal.stdout}" return terminal.stdout.strip() - @reporter.step("Generate command script") - def _generate_command_script(self) -> None: - command = self.cmd.replace('"', '\\"').replace("\\", "\\\\") + @reporter.step_deco("Generate command script") + def _generate_command_script(self, command: str) -> None: + command = command.replace('"', '\\"').replace("\\", "\\\\") script = ( f"#!/bin/bash\n" f"cd {self.process_dir}\n" - f"date +%s > {self.process_dir}/start_time\n" f"{command} &\n" f"pid=\$!\n" f"cd {self.process_dir}\n" f"echo \$pid > {self.process_dir}/pid\n" f"wait \$pid\n" - f"echo $? > {self.process_dir}/rc\n" - f"date +%s > {self.process_dir}/end_time\n" + f"echo $? > {self.process_dir}/rc" ) - self.shell.exec( - f'echo "{script}" > {self.process_dir}/command.sh', - CommandOptions(extra_inspectors=self.cmd_inspectors), - ) - self.shell.exec( - f"cat {self.process_dir}/command.sh", - CommandOptions(extra_inspectors=self.cmd_inspectors), - ) - self.shell.exec( - f"chmod +x {self.process_dir}/command.sh", - CommandOptions(extra_inspectors=self.cmd_inspectors), - ) + self.shell.exec(f'echo "{script}" > {self.process_dir}/command.sh') + self.shell.exec(f"cat {self.process_dir}/command.sh") + self.shell.exec(f"chmod +x {self.process_dir}/command.sh") diff --git a/src/frostfs_testlib/reporter/__init__.py b/src/frostfs_testlib/reporter/__init__.py index 848c175..10e4146 100644 --- a/src/frostfs_testlib/reporter/__init__.py +++ b/src/frostfs_testlib/reporter/__init__.py @@ -1,9 +1,6 @@ -from typing import Any - from frostfs_testlib.reporter.allure_handler import AllureHandler from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.reporter import Reporter -from frostfs_testlib.reporter.steps_logger import StepsLogger __reporter = Reporter() @@ -18,11 +15,3 @@ def get_reporter() -> Reporter: Singleton reporter instance. """ return __reporter - - -def step(title: str): - return __reporter.step(title) - - -def attach(content: Any, file_name: str): - return __reporter.attach(content, file_name) diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py index ef63638..8e00b26 100644 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -1,5 +1,5 @@ import os -from contextlib import AbstractContextManager, ContextDecorator +from contextlib import AbstractContextManager from textwrap import shorten from typing import Any, Callable @@ -12,8 +12,8 @@ from frostfs_testlib.reporter.interfaces import ReporterHandler class AllureHandler(ReporterHandler): """Handler that stores test artifacts in Allure report.""" - def step(self, name: str) -> AbstractContextManager | ContextDecorator: - name = shorten(name, width=140, placeholder="...") + def step(self, name: str) -> AbstractContextManager: + name = shorten(name, width=70, placeholder="...") return allure.step(name) def step_decorator(self, name: str) -> Callable: @@ -21,14 +21,9 @@ class AllureHandler(ReporterHandler): def attach(self, body: Any, file_name: str) -> None: attachment_name, extension = os.path.splitext(file_name) - if extension.startswith("."): - extension = extension[1:] attachment_type = self._resolve_attachment_type(extension) - if os.path.exists(body): - allure.attach.file(body, file_name, attachment_type, extension) - else: - allure.attach(body, attachment_name, attachment_type, extension) + allure.attach(body, attachment_name, attachment_type, extension) def _resolve_attachment_type(self, extension: str) -> attachment_type: """Try to find matching Allure attachment type by extension. diff --git a/src/frostfs_testlib/reporter/interfaces.py b/src/frostfs_testlib/reporter/interfaces.py index 4e24feb..b47a3fb 100644 --- a/src/frostfs_testlib/reporter/interfaces.py +++ b/src/frostfs_testlib/reporter/interfaces.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from contextlib import AbstractContextManager, ContextDecorator +from contextlib import AbstractContextManager from typing import Any, Callable @@ -7,7 +7,7 @@ class ReporterHandler(ABC): """Interface of handler that stores test artifacts in some reporting tool.""" @abstractmethod - def step(self, name: str) -> AbstractContextManager | ContextDecorator: + def step(self, name: str) -> AbstractContextManager: """Register a new step in test execution. Args: diff --git a/src/frostfs_testlib/reporter/reporter.py b/src/frostfs_testlib/reporter/reporter.py index 2d1a43e..d1c75f5 100644 --- a/src/frostfs_testlib/reporter/reporter.py +++ b/src/frostfs_testlib/reporter/reporter.py @@ -5,7 +5,6 @@ from typing import Any, Callable, Optional from frostfs_testlib.plugins import load_plugin from frostfs_testlib.reporter.interfaces import ReporterHandler -from frostfs_testlib.utils.func_utils import format_by_args @contextmanager @@ -64,8 +63,7 @@ class Reporter: def wrapper(*a, **kw): resulting_func = func for handler in self.handlers: - parsed_name = format_by_args(func, name, *a, **kw) - decorator = handler.step_decorator(parsed_name) + decorator = handler.step_decorator(name) resulting_func = decorator(resulting_func) return resulting_func(*a, **kw) @@ -83,11 +81,11 @@ class Reporter: Returns: Step context. """ + if not self.handlers: + return _empty_step() + step_contexts = [handler.step(name) for handler in self.handlers] - if not step_contexts: - step_contexts = [_empty_step()] - decorated_wrapper = self.step_deco(name) - return AggregateContextManager(step_contexts, decorated_wrapper) + return AggregateContextManager(step_contexts) def attach(self, content: Any, file_name: str) -> None: """Attach specified content with given file name to the test report. @@ -106,10 +104,9 @@ class AggregateContextManager(AbstractContextManager): contexts: list[AbstractContextManager] - def __init__(self, contexts: list[AbstractContextManager], decorated_wrapper: Callable) -> None: + def __init__(self, contexts: list[AbstractContextManager]) -> None: super().__init__() self.contexts = contexts - self.wrapper = decorated_wrapper def __enter__(self): for context in self.contexts: @@ -130,6 +127,3 @@ class AggregateContextManager(AbstractContextManager): # If all context agreed to suppress exception, then suppress it; # otherwise return None to reraise return True if all(suppress_decisions) else None - - def __call__(self, *args: Any, **kwds: Any) -> Any: - return self.wrapper(*args, **kwds) diff --git a/src/frostfs_testlib/reporter/steps_logger.py b/src/frostfs_testlib/reporter/steps_logger.py deleted file mode 100644 index 4cdfb3d..0000000 --- a/src/frostfs_testlib/reporter/steps_logger.py +++ /dev/null @@ -1,56 +0,0 @@ -import logging -import threading -from contextlib import AbstractContextManager, ContextDecorator -from functools import wraps -from types import TracebackType -from typing import Any, Callable - -from frostfs_testlib.reporter.interfaces import ReporterHandler - - -class StepsLogger(ReporterHandler): - """Handler that prints steps to log.""" - - def step(self, name: str) -> AbstractContextManager | ContextDecorator: - return StepLoggerContext(name) - - def step_decorator(self, name: str) -> Callable: - return StepLoggerContext(name) - - def attach(self, body: Any, file_name: str) -> None: - pass - - -class StepLoggerContext(AbstractContextManager): - INDENT = {} - - def __init__(self, title: str): - self.title = title - self.logger = logging.getLogger("NeoLogger") - self.thread = threading.get_ident() - if self.thread not in StepLoggerContext.INDENT: - StepLoggerContext.INDENT[self.thread] = 1 - - def __enter__(self) -> Any: - indent = ">" * StepLoggerContext.INDENT[self.thread] - self.logger.info(f"[{self.thread}] {indent} {self.title}") - StepLoggerContext.INDENT[self.thread] += 1 - - def __exit__( - self, - __exc_type: type[BaseException] | None, - __exc_value: BaseException | None, - __traceback: TracebackType | None, - ) -> bool | None: - - StepLoggerContext.INDENT[self.thread] -= 1 - indent = "<" * StepLoggerContext.INDENT[self.thread] - self.logger.info(f"[{self.thread}] {indent} {self.title}") - - def __call__(self, func): - @wraps(func) - def impl(*a, **kw): - with self: - return func(*a, **kw) - - return impl diff --git a/src/frostfs_testlib/resources/cli.py b/src/frostfs_testlib/resources/cli.py index 06a9832..5f7d468 100644 --- a/src/frostfs_testlib/resources/cli.py +++ b/src/frostfs_testlib/resources/cli.py @@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm") # Config for frostfs-adm utility. Optional if tests are running against devenv FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") -CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s") +CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None) diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 53bcfaa..dfbb3a1 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -10,8 +10,6 @@ COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000") SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m") -STORAGE_USER_NAME = "frostfs-storage" - MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s") MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "8s") FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s") @@ -43,14 +41,6 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file: # Number of attempts that S3 clients will attempt per each request (1 means single attempt # without any retries) -MAX_REQUEST_ATTEMPTS = 5 +MAX_REQUEST_ATTEMPTS = 1 RETRY_MODE = "standard" CREDENTIALS_CREATE_TIMEOUT = "1m" - - -HOSTING_CONFIG_FILE = os.getenv( - "HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml")) -) - -MORE_LOG = os.getenv("MORE_LOG", "1") -EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH" diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 6c0cb14..e2e4c48 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -1,6 +1,5 @@ # Regex patterns of status codes of Container service CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" -SUBJECT_NOT_FOUND = "code = 1024.*message = frostfs error: chain/client.*subject not found.*" # Regex patterns of status codes of Object service MALFORMED_REQUEST = "code = 1024.*message = malformed request" @@ -10,7 +9,6 @@ OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" SESSION_NOT_FOUND = "code = 4096.*message = session token not found" OUT_OF_RANGE = "code = 2053.*message = out of range" EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" -ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied" # TODO: Change to codes with message # OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" # LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed @@ -25,14 +23,6 @@ INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow" INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" -S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" -S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." - -RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" -# Errors from node missing reasons if request was forwarded. Commenting for now -# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" -RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request" -NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" -# Errors from node missing reasons if request was forwarded. Commenting for now -# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" -NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request" +S3_MALFORMED_XML_REQUEST = ( + "The XML you provided was not well-formed or did not validate against our published schema." +) diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index ad3ed1c..6699207 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -11,9 +11,8 @@ BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0) BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0) BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) -BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1800) +BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 600) BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) -BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME = float(os.getenv("BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME", 0.8)) BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") # This will decrease load params for some weak environments @@ -26,9 +25,8 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv( ) BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") -PRESET_CONTAINER_CREATION_RETRY_COUNT = os.getenv("CONTAINER_CREATION_RETRY_COUNT", "20") # TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) -PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1") +PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "10") K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30") K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300)) diff --git a/src/frostfs_testlib/resources/optionals.py b/src/frostfs_testlib/resources/optionals.py index 6caf158..2a7ff22 100644 --- a/src/frostfs_testlib/resources/optionals.py +++ b/src/frostfs_testlib/resources/optionals.py @@ -16,10 +16,11 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD") OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true")) # Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped. -OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")) +OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool( + os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true") +) # Set this to False for disable autouse fixture like node healthcheck during developing time. -OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")) - -# Use cache for fixtures with @cachec_fixture decorator -OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false")) +OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool( + os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true") +) diff --git a/src/frostfs_testlib/resources/s3_acl_grants.py b/src/frostfs_testlib/resources/s3_acl_grants.py deleted file mode 100644 index a716bc5..0000000 --- a/src/frostfs_testlib/resources/s3_acl_grants.py +++ /dev/null @@ -1,9 +0,0 @@ -ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers" -ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"} -ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"} -CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"} - -# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl -PRIVATE_GRANTS = [] -PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT] -PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT] diff --git a/src/frostfs_testlib/s3/__init__.py b/src/frostfs_testlib/s3/__init__.py new file mode 100644 index 0000000..32426c2 --- /dev/null +++ b/src/frostfs_testlib/s3/__init__.py @@ -0,0 +1,3 @@ +from frostfs_testlib.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py new file mode 100644 index 0000000..054a1e8 --- /dev/null +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -0,0 +1,754 @@ +import json +import logging +import os +import uuid +from datetime import datetime +from time import sleep +from typing import Literal, Optional, Union + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.common import ( + ASSETS_DIR, + MAX_REQUEST_ATTEMPTS, + RETRY_MODE, + S3_SYNC_WAIT_TIME, +) +from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict + +# TODO: Refactor this code to use shell instead of _cmd_run +from frostfs_testlib.utils.cli_utils import _cmd_run, _configure_aws_cli + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") +LONG_TIMEOUT = 240 + + +class AwsCliClient(S3ClientWrapper): + # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed + # certificate in devenv) and disable automatic pagination in CLI output + common_flags = "--no-verify-ssl --no-paginate" + s3gate_endpoint: str + + @reporter.step_deco("Configure S3 client (aws cli)") + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + self.s3gate_endpoint = s3gate_endpoint + try: + _configure_aws_cli("aws configure", access_key_id, secret_access_key) + _cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") + _cmd_run(f"aws configure set retry_mode {RETRY_MODE}") + except Exception as err: + raise RuntimeError("Error while configuring AwsCliClient") from err + + @reporter.step_deco("Create bucket S3") + def create_bucket( + self, + bucket: Optional[str] = None, + object_lock_enabled_for_bucket: Optional[bool] = None, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + grant_full_control: Optional[str] = None, + location_constraint: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = str(uuid.uuid4()) + + if object_lock_enabled_for_bucket is None: + object_lock = "" + elif object_lock_enabled_for_bucket: + object_lock = " --object-lock-enabled-for-bucket" + else: + object_lock = " --no-object-lock-enabled-for-bucket" + cmd = ( + f"aws {self.common_flags} s3api create-bucket --bucket {bucket} " + f"{object_lock} --endpoint {self.s3gate_endpoint}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_full_control: + cmd += f" --grant-full-control {grant_full_control}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + if location_constraint: + cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" + _cmd_run(cmd) + sleep(S3_SYNC_WAIT_TIME) + + return bucket + + @reporter.step_deco("List buckets S3") + def list_buckets(self) -> list[str]: + cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}" + output = _cmd_run(cmd) + buckets_json = self._to_json(output) + return [bucket["Name"] for bucket in buckets_json["Buckets"]] + + @reporter.step_deco("Delete bucket S3") + def delete_bucket(self, bucket: str) -> None: + cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" + _cmd_run(cmd, LONG_TIMEOUT) + sleep(S3_SYNC_WAIT_TIME) + + @reporter.step_deco("Head bucket S3") + def head_bucket(self, bucket: str) -> None: + cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" + _cmd_run(cmd) + + @reporter.step_deco("Put bucket versioning status") + def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + cmd = ( + f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " + f"--versioning-configuration Status={status.value} " + f"--endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Get bucket versioning status") + def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Status") + + @reporter.step_deco("Put bucket tagging") + def put_bucket_tagging(self, bucket: str, tags: list) -> None: + tags_json = { + "TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + } + cmd = ( + f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " + f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Get bucket tagging") + def get_bucket_tagging(self, bucket: str) -> list: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("TagSet") + + @reporter.step_deco("Get bucket acl") + def get_bucket_acl(self, bucket: str) -> list: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Grants") + + @reporter.step_deco("Get bucket location") + def get_bucket_location(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("LocationConstraint") + + @reporter.step_deco("List objects S3") + def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + cmd = ( + f"aws {self.common_flags} s3api list-objects --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step_deco("List objects S3 v2") + def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + cmd = ( + f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step_deco("List objects versions S3") + def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + cmd = ( + f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response if full_output else response.get("Versions", []) + + @reporter.step_deco("List objects delete markers S3") + def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + cmd = ( + f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response if full_output else response.get("DeleteMarkers", []) + + @reporter.step_deco("Copy object S3") + def copy_object( + self, + source_bucket: str, + source_key: str, + bucket: Optional[str] = None, + key: Optional[str] = None, + acl: Optional[str] = None, + metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, + metadata: Optional[dict] = None, + tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, + tagging: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = source_bucket + if key is None: + key = os.path.join(os.getcwd(), str(uuid.uuid4())) + copy_source = f"{source_bucket}/{source_key}" + + cmd = ( + f"aws {self.common_flags} s3api copy-object --copy-source {copy_source} " + f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint}" + ) + if acl: + cmd += f" --acl {acl}" + if metadata_directive: + cmd += f" --metadata-directive {metadata_directive}" + if metadata: + cmd += " --metadata " + for meta_key, value in metadata.items(): + cmd += f" {meta_key}={value}" + if tagging_directive: + cmd += f" --tagging-directive {tagging_directive}" + if tagging: + cmd += f" --tagging {tagging}" + _cmd_run(cmd, LONG_TIMEOUT) + return key + + @reporter.step_deco("Put object S3") + def put_object( + self, + bucket: str, + filepath: str, + key: Optional[str] = None, + metadata: Optional[dict] = None, + tagging: Optional[str] = None, + acl: Optional[str] = None, + object_lock_mode: Optional[str] = None, + object_lock_retain_until_date: Optional[datetime] = None, + object_lock_legal_hold_status: Optional[str] = None, + grant_full_control: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> str: + if key is None: + key = os.path.basename(filepath) + + cmd = ( + f"aws {self.common_flags} s3api put-object --bucket {bucket} --key {key} " + f"--body {filepath} --endpoint {self.s3gate_endpoint}" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if tagging: + cmd += f" --tagging '{tagging}'" + if acl: + cmd += f" --acl {acl}" + if object_lock_mode: + cmd += f" --object-lock-mode {object_lock_mode}" + if object_lock_retain_until_date: + cmd += f' --object-lock-retain-until-date "{object_lock_retain_until_date}"' + if object_lock_legal_hold_status: + cmd += f" --object-lock-legal-hold-status {object_lock_legal_hold_status}" + if grant_full_control: + cmd += f" --grant-full-control '{grant_full_control}'" + if grant_read: + cmd += f" --grant-read {grant_read}" + output = _cmd_run(cmd, LONG_TIMEOUT) + response = self._to_json(output) + return response.get("VersionId") + + @reporter.step_deco("Head object S3") + def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response + + @reporter.step_deco("Get object S3") + def get_object( + self, + bucket: str, + key: str, + version_id: Optional[str] = None, + object_range: Optional[tuple[int, int]] = None, + full_output: bool = False, + ) -> Union[dict, str]: + file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " + f"{version} {file_path} --endpoint {self.s3gate_endpoint}" + ) + if object_range: + cmd += f" --range bytes={object_range[0]}-{object_range[1]}" + output = _cmd_run(cmd) + response = self._to_json(output) + return response if full_output else file_path + + @reporter.step_deco("Get object ACL") + def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Grants") + + @reporter.step_deco("Put object ACL") + def put_object_acl( + self, + bucket: str, + key: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + cmd = ( + f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " + f" --endpoint {self.s3gate_endpoint}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Grants") + + @reporter.step_deco("Put bucket ACL") + def put_bucket_acl( + self, + bucket: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> None: + cmd = ( + f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " + f" --endpoint {self.s3gate_endpoint}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + _cmd_run(cmd) + + @reporter.step_deco("Delete objects S3") + def delete_objects(self, bucket: str, keys: list[str]) -> dict: + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") + delete_structure = json.dumps(_make_objs_dict(keys)) + with open(file_path, "w") as out_file: + out_file.write(delete_structure) + logger.info(f"Input file for delete-objects: {delete_structure}") + + cmd = ( + f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd, LONG_TIMEOUT) + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME) + return response + + @reporter.step_deco("Delete object S3") + def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api delete-object --bucket {bucket} " + f"--key {key} {version} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd, LONG_TIMEOUT) + sleep(S3_SYNC_WAIT_TIME) + return self._to_json(output) + + @reporter.step_deco("Delete object versions S3") + def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + # Build deletion list in S3 format + delete_list = { + "Objects": [ + { + "Key": object_version["Key"], + "VersionId": object_version["VersionId"], + } + for object_version in object_versions + ] + } + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") + delete_structure = json.dumps(delete_list) + with open(file_path, "w") as out_file: + out_file.write(delete_structure) + logger.info(f"Input file for delete-objects: {delete_structure}") + + cmd = ( + f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd, LONG_TIMEOUT) + sleep(S3_SYNC_WAIT_TIME) + return self._to_json(output) + + @reporter.step_deco("Delete object versions S3 without delete markers") + def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + # Delete objects without creating delete markers + for object_version in object_versions: + self.delete_object( + bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"] + ) + + @reporter.step_deco("Get object attributes") + def get_object_attributes( + self, + bucket: str, + key: str, + attributes: list[str], + version_id: str = "", + max_parts: int = 0, + part_number: int = 0, + full_output: bool = True, + ) -> dict: + + attrs = ",".join(attributes) + version = f" --version-id {version_id}" if version_id else "" + parts = f"--max-parts {max_parts}" if max_parts else "" + part_number_str = f"--part-number-marker {part_number}" if part_number else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " + f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + + for attr in attributes: + assert attr in response, f"Expected attribute {attr} in {response}" + + if full_output: + return response + else: + return response.get(attributes[0]) + + @reporter.step_deco("Get bucket policy") + def get_bucket_policy(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Policy") + + @reporter.step_deco("Put bucket policy") + def put_bucket_policy(self, bucket: str, policy: dict) -> None: + # Leaving it as is was in test repo. Double dumps to escape resulting string + # Example: + # policy = {"a": 1} + # json.dumps(policy) => {"a": 1} + # json.dumps(json.dumps(policy)) => "{\"a\": 1}" + # TODO: update this + dumped_policy = json.dumps(json.dumps(policy)) + cmd = ( + f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " + f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Get bucket cors") + def get_bucket_cors(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("CORSRules") + + @reporter.step_deco("Put bucket cors") + def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + cmd = ( + f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " + f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Delete bucket cors") + def delete_bucket_cors(self, bucket: str) -> None: + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Delete bucket tagging") + def delete_bucket_tagging(self, bucket: str) -> None: + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Put object retention") + def put_object_retention( + self, + bucket: str, + key: str, + retention: dict, + version_id: Optional[str] = None, + bypass_governance_retention: Optional[bool] = None, + ) -> None: + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " + f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}" + ) + if bypass_governance_retention is not None: + cmd += " --bypass-governance-retention" + _cmd_run(cmd) + + @reporter.step_deco("Put object legal hold") + def put_object_legal_hold( + self, + bucket: str, + key: str, + legal_hold_status: Literal["ON", "OFF"], + version_id: Optional[str] = None, + ) -> None: + version = f" --version-id {version_id}" if version_id else "" + legal_hold = json.dumps({"Status": legal_hold_status}) + cmd = ( + f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " + f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Put object tagging") + def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + cmd = ( + f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " + f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Get object tagging") + def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("TagSet") + + @reporter.step_deco("Delete object tagging") + def delete_object_tagging(self, bucket: str, key: str) -> None: + cmd = ( + f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " + f"--key {key} --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Sync directory S3") + def sync( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + cmd = ( + f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if acl: + cmd += f" --acl {acl}" + output = _cmd_run(cmd, LONG_TIMEOUT) + return self._to_json(output) + + @reporter.step_deco("CP directory S3") + def cp( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + cmd = ( + f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " + f"--endpoint-url {self.s3gate_endpoint} --recursive" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if acl: + cmd += f" --acl {acl}" + output = _cmd_run(cmd, LONG_TIMEOUT) + return self._to_json(output) + + @reporter.step_deco("Create multipart upload S3") + def create_multipart_upload(self, bucket: str, key: str) -> str: + cmd = ( + f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " + f"--key {key} --endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + + assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" + + return response["UploadId"] + + @reporter.step_deco("List multipart uploads S3") + def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + cmd = ( + f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Uploads") + + @reporter.step_deco("Abort multipart upload S3") + def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + cmd = ( + f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " + f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Upload part S3") + def upload_part( + self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str + ) -> str: + cmd = ( + f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd, LONG_TIMEOUT) + response = self._to_json(output) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" + return response["ETag"] + + @reporter.step_deco("Upload copy part S3") + def upload_part_copy( + self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str + ) -> str: + cmd = ( + f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd, LONG_TIMEOUT) + response = self._to_json(output) + assert response.get("CopyPartResult", []).get( + "ETag" + ), f"Expected ETag in response:\n{response}" + + return response["CopyPartResult"]["ETag"] + + @reporter.step_deco("List parts S3") + def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + cmd = ( + f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + + assert response.get("Parts"), f"Expected Parts in response:\n{response}" + + return response["Parts"] + + @reporter.step_deco("Complete multipart upload S3") + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") + parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} + + with open(file_path, "w") as out_file: + out_file.write(json.dumps(parts_dict)) + + logger.info(f"Input file for complete-multipart-upload: {json.dumps(parts_dict)}") + + cmd = ( + f"aws {self.common_flags} s3api complete-multipart-upload --bucket {bucket} " + f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Put object lock configuration") + def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + cmd = ( + f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " + f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + return self._to_json(output) + + @reporter.step_deco("Get object lock configuration") + def get_object_lock_configuration(self, bucket: str): + cmd = ( + f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("ObjectLockConfiguration") + + @staticmethod + def _to_json(output: str) -> dict: + json_output = {} + if "{" not in output and "}" not in output: + logger.warning(f"Could not parse json from output {output}") + return json_output + + json_output = json.loads(output[output.index("{") :]) + + return json_output diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py new file mode 100644 index 0000000..07c693f --- /dev/null +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -0,0 +1,661 @@ +import json +import logging +import os +import uuid +from datetime import datetime +from functools import wraps +from time import sleep +from typing import Literal, Optional, Union + +import boto3 +import urllib3 +from botocore.config import Config +from botocore.exceptions import ClientError +from mypy_boto3_s3 import S3Client + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.common import ( + ASSETS_DIR, + MAX_REQUEST_ATTEMPTS, + RETRY_MODE, + S3_SYNC_WAIT_TIME, +) +from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.utils.cli_utils import log_command_execution + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + +# Disable warnings on self-signed certificate which the +# boto library produces on requests to S3-gate in dev-env +urllib3.disable_warnings() + + +def report_error(func): + @wraps(func) + def deco(*a, **kw): + try: + return func(*a, **kw) + except ClientError as err: + log_command_execution("Result", str(err)) + raise + + return deco + + +class Boto3ClientWrapper(S3ClientWrapper): + @reporter.step_deco("Configure S3 client (boto3)") + @report_error + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + session = boto3.Session() + config = Config( + retries={ + "max_attempts": MAX_REQUEST_ATTEMPTS, + "mode": RETRY_MODE, + } + ) + + self.boto3_client: S3Client = session.client( + service_name="s3", + aws_access_key_id=access_key_id, + aws_secret_access_key=secret_access_key, + config=config, + endpoint_url=s3gate_endpoint, + verify=False, + ) + + def _to_s3_param(self, param: str): + replacement_map = { + "Acl": "ACL", + "Cors": "CORS", + "_": "", + } + result = param.title() + for find, replace in replacement_map.items(): + result = result.replace(find, replace) + return result + + # BUCKET METHODS # + @reporter.step_deco("Create bucket S3") + @report_error + def create_bucket( + self, + bucket: Optional[str] = None, + object_lock_enabled_for_bucket: Optional[bool] = None, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + grant_full_control: Optional[str] = None, + location_constraint: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = str(uuid.uuid4()) + + params = {"Bucket": bucket} + if object_lock_enabled_for_bucket is not None: + params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) + if acl is not None: + params.update({"ACL": acl}) + elif grant_write or grant_read or grant_full_control: + if grant_write: + params.update({"GrantWrite": grant_write}) + elif grant_read: + params.update({"GrantRead": grant_read}) + elif grant_full_control: + params.update({"GrantFullControl": grant_full_control}) + if location_constraint: + params.update( + {"CreateBucketConfiguration": {"LocationConstraint": location_constraint}} + ) + + s3_bucket = self.boto3_client.create_bucket(**params) + log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) + sleep(S3_SYNC_WAIT_TIME) + return bucket + + @reporter.step_deco("List buckets S3") + @report_error + def list_buckets(self) -> list[str]: + found_buckets = [] + + response = self.boto3_client.list_buckets() + log_command_execution("S3 List buckets result", response) + + for bucket in response["Buckets"]: + found_buckets.append(bucket["Name"]) + + return found_buckets + + @reporter.step_deco("Delete bucket S3") + @report_error + def delete_bucket(self, bucket: str) -> None: + response = self.boto3_client.delete_bucket(Bucket=bucket) + log_command_execution("S3 Delete bucket result", response) + sleep(S3_SYNC_WAIT_TIME) + + @reporter.step_deco("Head bucket S3") + @report_error + def head_bucket(self, bucket: str) -> None: + response = self.boto3_client.head_bucket(Bucket=bucket) + log_command_execution("S3 Head bucket result", response) + + @reporter.step_deco("Put bucket versioning status") + @report_error + def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + response = self.boto3_client.put_bucket_versioning( + Bucket=bucket, VersioningConfiguration={"Status": status.value} + ) + log_command_execution("S3 Set bucket versioning to", response) + + @reporter.step_deco("Get bucket versioning status") + @report_error + def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + response = self.boto3_client.get_bucket_versioning(Bucket=bucket) + status = response.get("Status") + log_command_execution("S3 Got bucket versioning status", response) + return status + + @reporter.step_deco("Put bucket tagging") + @report_error + def put_bucket_tagging(self, bucket: str, tags: list) -> None: + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging) + log_command_execution("S3 Put bucket tagging", response) + + @reporter.step_deco("Get bucket tagging") + @report_error + def get_bucket_tagging(self, bucket: str) -> list: + response = self.boto3_client.get_bucket_tagging(Bucket=bucket) + log_command_execution("S3 Get bucket tagging", response) + return response.get("TagSet") + + @reporter.step_deco("Get bucket acl") + @report_error + def get_bucket_acl(self, bucket: str) -> list: + response = self.boto3_client.get_bucket_acl(Bucket=bucket) + log_command_execution("S3 Get bucket acl", response) + return response.get("Grants") + + @reporter.step_deco("Delete bucket tagging") + @report_error + def delete_bucket_tagging(self, bucket: str) -> None: + response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) + log_command_execution("S3 Delete bucket tagging", response) + + @reporter.step_deco("Put bucket ACL") + @report_error + def put_bucket_acl( + self, + bucket: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> None: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.put_bucket_acl(**params) + log_command_execution("S3 ACL bucket result", response) + + @reporter.step_deco("Put object lock configuration") + @report_error + def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + response = self.boto3_client.put_object_lock_configuration( + Bucket=bucket, ObjectLockConfiguration=configuration + ) + log_command_execution("S3 put_object_lock_configuration result", response) + return response + + @reporter.step_deco("Get object lock configuration") + @report_error + def get_object_lock_configuration(self, bucket: str) -> dict: + response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) + log_command_execution("S3 get_object_lock_configuration result", response) + return response.get("ObjectLockConfiguration") + + @reporter.step_deco("Get bucket policy") + @report_error + def get_bucket_policy(self, bucket: str) -> str: + response = self.boto3_client.get_bucket_policy(Bucket=bucket) + log_command_execution("S3 get_bucket_policy result", response) + return response.get("Policy") + + @reporter.step_deco("Put bucket policy") + @report_error + def put_bucket_policy(self, bucket: str, policy: dict) -> None: + response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)) + log_command_execution("S3 put_bucket_policy result", response) + return response + + @reporter.step_deco("Get bucket cors") + @report_error + def get_bucket_cors(self, bucket: str) -> dict: + response = self.boto3_client.get_bucket_cors(Bucket=bucket) + log_command_execution("S3 get_bucket_cors result", response) + return response.get("CORSRules") + + @reporter.step_deco("Get bucket location") + @report_error + def get_bucket_location(self, bucket: str) -> str: + response = self.boto3_client.get_bucket_location(Bucket=bucket) + log_command_execution("S3 get_bucket_location result", response) + return response.get("LocationConstraint") + + @reporter.step_deco("Put bucket cors") + @report_error + def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + response = self.boto3_client.put_bucket_cors( + Bucket=bucket, CORSConfiguration=cors_configuration + ) + log_command_execution("S3 put_bucket_cors result", response) + return response + + @reporter.step_deco("Delete bucket cors") + @report_error + def delete_bucket_cors(self, bucket: str) -> None: + response = self.boto3_client.delete_bucket_cors(Bucket=bucket) + log_command_execution("S3 delete_bucket_cors result", response) + + # END OF BUCKET METHODS # + # OBJECT METHODS # + + @reporter.step_deco("List objects S3 v2") + @report_error + def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + response = self.boto3_client.list_objects_v2(Bucket=bucket) + log_command_execution("S3 v2 List objects result", response) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step_deco("List objects S3") + @report_error + def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + response = self.boto3_client.list_objects(Bucket=bucket) + log_command_execution("S3 List objects result", response) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step_deco("List objects versions S3") + @report_error + def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + response = self.boto3_client.list_object_versions(Bucket=bucket) + log_command_execution("S3 List objects versions result", response) + return response if full_output else response.get("Versions", []) + + @reporter.step_deco("List objects delete markers S3") + @report_error + def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + response = self.boto3_client.list_object_versions(Bucket=bucket) + log_command_execution("S3 List objects delete markers result", response) + return response if full_output else response.get("DeleteMarkers", []) + + @reporter.step_deco("Put object S3") + @report_error + def put_object( + self, + bucket: str, + filepath: str, + key: Optional[str] = None, + metadata: Optional[dict] = None, + tagging: Optional[str] = None, + acl: Optional[str] = None, + object_lock_mode: Optional[str] = None, + object_lock_retain_until_date: Optional[datetime] = None, + object_lock_legal_hold_status: Optional[str] = None, + grant_full_control: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> str: + if key is None: + key = os.path.basename(filepath) + + with open(filepath, "rb") as put_file: + body = put_file.read() + + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self", "filepath", "put_file"] and value is not None + } + response = self.boto3_client.put_object(**params) + log_command_execution("S3 Put object result", response) + return response.get("VersionId") + + @reporter.step_deco("Head object S3") + @report_error + def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.head_object(**params) + log_command_execution("S3 Head object result", response) + return response + + @reporter.step_deco("Delete object S3") + @report_error + def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.delete_object(**params) + log_command_execution("S3 Delete object result", response) + sleep(S3_SYNC_WAIT_TIME) + return response + + @reporter.step_deco("Delete objects S3") + @report_error + def delete_objects(self, bucket: str, keys: list[str]) -> dict: + response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys)) + log_command_execution("S3 Delete objects result", response) + assert ( + "Errors" not in response + ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' + sleep(S3_SYNC_WAIT_TIME) + return response + + @reporter.step_deco("Delete object versions S3") + @report_error + def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + # Build deletion list in S3 format + delete_list = { + "Objects": [ + { + "Key": object_version["Key"], + "VersionId": object_version["VersionId"], + } + for object_version in object_versions + ] + } + response = self.boto3_client.delete_objects(Bucket=bucket, Delete=delete_list) + log_command_execution("S3 Delete objects result", response) + return response + + @reporter.step_deco("Delete object versions S3 without delete markers") + @report_error + def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + # Delete objects without creating delete markers + for object_version in object_versions: + response = self.boto3_client.delete_object( + Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"] + ) + log_command_execution("S3 Delete object result", response) + + @reporter.step_deco("Put object ACL") + @report_error + def put_object_acl( + self, + bucket: str, + key: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + # pytest.skip("Method put_object_acl is not supported by boto3 client") + raise NotImplementedError("Unsupported for boto3 client") + + @reporter.step_deco("Get object ACL") + @report_error + def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.get_object_acl(**params) + log_command_execution("S3 ACL objects result", response) + return response.get("Grants") + + @reporter.step_deco("Copy object S3") + @report_error + def copy_object( + self, + source_bucket: str, + source_key: str, + bucket: Optional[str] = None, + key: Optional[str] = None, + acl: Optional[str] = None, + metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, + metadata: Optional[dict] = None, + tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, + tagging: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = source_bucket + if key is None: + key = os.path.join(os.getcwd(), str(uuid.uuid4())) + copy_source = f"{source_bucket}/{source_key}" + + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self", "source_bucket", "source_key"] and value is not None + } + response = self.boto3_client.copy_object(**params) + log_command_execution("S3 Copy objects result", response) + return key + + @reporter.step_deco("Get object S3") + @report_error + def get_object( + self, + bucket: str, + key: str, + version_id: Optional[str] = None, + object_range: Optional[tuple[int, int]] = None, + full_output: bool = False, + ) -> Union[dict, str]: + filename = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + range_str = None + if object_range: + range_str = f"bytes={object_range[0]}-{object_range[1]}" + + params = { + self._to_s3_param(param): value + for param, value in {**locals(), **{"Range": range_str}}.items() + if param not in ["self", "object_range", "full_output", "range_str", "filename"] + and value is not None + } + response = self.boto3_client.get_object(**params) + log_command_execution("S3 Get objects result", response) + + with open(f"{filename}", "wb") as get_file: + chunk = response["Body"].read(1024) + while chunk: + get_file.write(chunk) + chunk = response["Body"].read(1024) + return response if full_output else filename + + @reporter.step_deco("Create multipart upload S3") + @report_error + def create_multipart_upload(self, bucket: str, key: str) -> str: + response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key) + log_command_execution("S3 Created multipart upload", response) + assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" + + return response["UploadId"] + + @reporter.step_deco("List multipart uploads S3") + @report_error + def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + response = self.boto3_client.list_multipart_uploads(Bucket=bucket) + log_command_execution("S3 List multipart upload", response) + + return response.get("Uploads") + + @reporter.step_deco("Abort multipart upload S3") + @report_error + def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + response = self.boto3_client.abort_multipart_upload( + Bucket=bucket, Key=key, UploadId=upload_id + ) + log_command_execution("S3 Abort multipart upload", response) + + @reporter.step_deco("Upload part S3") + @report_error + def upload_part( + self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str + ) -> str: + with open(filepath, "rb") as put_file: + body = put_file.read() + + response = self.boto3_client.upload_part( + UploadId=upload_id, + Bucket=bucket, + Key=key, + PartNumber=part_num, + Body=body, + ) + log_command_execution("S3 Upload part", response) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" + + return response["ETag"] + + @reporter.step_deco("Upload copy part S3") + @report_error + def upload_part_copy( + self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str + ) -> str: + response = self.boto3_client.upload_part_copy( + UploadId=upload_id, + Bucket=bucket, + Key=key, + PartNumber=part_num, + CopySource=copy_source, + ) + log_command_execution("S3 Upload copy part", response) + assert response.get("CopyPartResult", []).get( + "ETag" + ), f"Expected ETag in response:\n{response}" + + return response["CopyPartResult"]["ETag"] + + @reporter.step_deco("List parts S3") + @report_error + def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key) + log_command_execution("S3 List part", response) + assert response.get("Parts"), f"Expected Parts in response:\n{response}" + + return response["Parts"] + + @reporter.step_deco("Complete multipart upload S3") + @report_error + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] + response = self.boto3_client.complete_multipart_upload( + Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts} + ) + log_command_execution("S3 Complete multipart upload", response) + + @reporter.step_deco("Put object retention") + @report_error + def put_object_retention( + self, + bucket: str, + key: str, + retention: dict, + version_id: Optional[str] = None, + bypass_governance_retention: Optional[bool] = None, + ) -> None: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.put_object_retention(**params) + log_command_execution("S3 Put object retention ", response) + + @reporter.step_deco("Put object legal hold") + @report_error + def put_object_legal_hold( + self, + bucket: str, + key: str, + legal_hold_status: Literal["ON", "OFF"], + version_id: Optional[str] = None, + ) -> None: + legal_hold = {"Status": legal_hold_status} + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self", "legal_hold_status"] and value is not None + } + response = self.boto3_client.put_object_legal_hold(**params) + log_command_execution("S3 Put object legal hold ", response) + + @reporter.step_deco("Put object tagging") + @report_error + def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging) + log_command_execution("S3 Put object tagging", response) + + @reporter.step_deco("Get object tagging") + @report_error + def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.get_object_tagging(**params) + log_command_execution("S3 Get object tagging", response) + return response.get("TagSet") + + @reporter.step_deco("Delete object tagging") + @report_error + def delete_object_tagging(self, bucket: str, key: str) -> None: + response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key) + log_command_execution("S3 Delete object tagging", response) + + @reporter.step_deco("Get object attributes") + @report_error + def get_object_attributes( + self, + bucket: str, + key: str, + attributes: list[str], + version_id: Optional[str] = None, + max_parts: Optional[int] = None, + part_number: Optional[int] = None, + full_output: bool = True, + ) -> dict: + logger.warning("Method get_object_attributes is not supported by boto3 client") + return {} + + @reporter.step_deco("Sync directory S3") + @report_error + def sync( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + raise NotImplementedError("Sync is not supported for boto3 client") + + @reporter.step_deco("CP directory S3") + @report_error + def cp( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + raise NotImplementedError("Cp is not supported for boto3 client") + + # END OBJECT METHODS # diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py similarity index 56% rename from src/frostfs_testlib/clients/s3/interfaces.py rename to src/frostfs_testlib/s3/interfaces.py index d636182..bd1379c 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -1,11 +1,8 @@ from abc import ABC, abstractmethod from datetime import datetime +from enum import Enum from typing import Literal, Optional, Union -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum -from frostfs_testlib.utils.file_utils import TestFile - def _make_objs_dict(key_names): objs_list = [] @@ -16,60 +13,27 @@ def _make_objs_dict(key_names): return objs_dict -class VersioningStatus(HumanReadableEnum): - UNDEFINED = None +class VersioningStatus(Enum): ENABLED = "Enabled" SUSPENDED = "Suspended" -class ACL: - PRIVATE = "private" - PUBLIC_READ = "public-read" - PUBLIC_READ_WRITE = "public-read-write" - AUTHENTICATED_READ = "authenticated-read" - AWS_EXEC_READ = "aws-exec-read" - BUCKET_OWNER_READ = "bucket-owner-read" - BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control" - LOG_DELIVERY_WRITE = "log-delivery-write" +ACL_COPY = [ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control", +] -class BucketContainerResolver(ABC): +class S3ClientWrapper(ABC): @abstractmethod - def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: - """ - Resolve Container ID from bucket name - - Args: - node: node from where we want to resolve - bucket_name: name of the bucket - **kwargs: any other required params - - Returns: Container ID - """ - raise NotImplementedError("Call from abstract class") - - -class S3ClientWrapper(HumanReadableABC): - access_key_id: str - secret_access_key: str - profile: str - region: str - - s3gate_endpoint: str - iam_endpoint: str - - @abstractmethod - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: pass - @abstractmethod - def set_endpoint(self, s3gate_endpoint: str): - """Set endpoint""" - - @abstractmethod - def set_iam_endpoint(self, iam_endpoint: str): - """Set iam endpoint""" - @abstractmethod def create_bucket( self, @@ -136,7 +100,7 @@ class S3ClientWrapper(HumanReadableABC): """Deletes the tags from the bucket.""" @abstractmethod - def get_bucket_acl(self, bucket: str) -> dict: + def get_bucket_acl(self, bucket: str) -> list: """This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket.""" @abstractmethod @@ -165,10 +129,6 @@ class S3ClientWrapper(HumanReadableABC): def get_bucket_policy(self, bucket: str) -> str: """Returns the policy of a specified bucket.""" - @abstractmethod - def delete_bucket_policy(self, bucket: str) -> str: - """Deletes the policy of a specified bucket.""" - @abstractmethod def put_bucket_policy(self, bucket: str, policy: dict) -> None: """Applies S3 bucket policy to an S3 bucket.""" @@ -203,9 +163,7 @@ class S3ClientWrapper(HumanReadableABC): """ @abstractmethod - def list_objects( - self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None - ) -> Union[dict, list[str]]: + def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: """Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application @@ -304,7 +262,7 @@ class S3ClientWrapper(HumanReadableABC): version_id: Optional[str] = None, object_range: Optional[tuple[int, int]] = None, full_output: bool = False, - ) -> dict | TestFile: + ) -> Union[dict, str]: """Retrieves objects from S3.""" @abstractmethod @@ -332,11 +290,15 @@ class S3ClientWrapper(HumanReadableABC): abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.""" @abstractmethod - def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: + def upload_part( + self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str + ) -> str: """Uploads a part in a multipart upload.""" @abstractmethod - def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: + def upload_part_copy( + self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str + ) -> str: """Uploads a part by copying data from an existing object as data source.""" @abstractmethod @@ -344,7 +306,7 @@ class S3ClientWrapper(HumanReadableABC): """Lists the parts that have been uploaded for a specific multipart upload.""" @abstractmethod - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: """Completes a multipart upload by assembling previously uploaded parts.""" @abstractmethod @@ -380,18 +342,6 @@ class S3ClientWrapper(HumanReadableABC): def delete_object_tagging(self, bucket: str, key: str) -> None: """Removes the entire tag set from the specified object.""" - @abstractmethod - def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: - """Adds or updates bucket lifecycle configuration""" - - @abstractmethod - def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: - """Gets bucket lifecycle configuration""" - - @abstractmethod - def delete_bucket_lifecycle(self, bucket: str) -> dict: - """Deletes bucket lifecycle""" - @abstractmethod def get_object_attributes( self, @@ -426,194 +376,3 @@ class S3ClientWrapper(HumanReadableABC): """cp directory TODO: Add proper description""" # END OF OBJECT METHODS # - - # IAM METHODS # - - @abstractmethod - def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - """Adds the specified user to the specified group""" - - @abstractmethod - def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - """Attaches the specified managed policy to the specified IAM group""" - - @abstractmethod - def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - """Attaches the specified managed policy to the specified user""" - - @abstractmethod - def iam_create_access_key(self, user_name: str) -> dict: - """Creates a new AWS secret access key and access key ID for the specified user""" - - @abstractmethod - def iam_create_group(self, group_name: str) -> dict: - """Creates a new group""" - - @abstractmethod - def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - """Creates a new managed policy for your AWS account""" - - @abstractmethod - def iam_create_user(self, user_name: str) -> dict: - """Creates a new IAM user for your AWS account""" - - @abstractmethod - def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - """Deletes the access key pair associated with the specified IAM user""" - - @abstractmethod - def iam_delete_group(self, group_name: str) -> dict: - """Deletes the specified IAM group""" - - @abstractmethod - def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - """Deletes the specified inline policy that is embedded in the specified IAM group""" - - @abstractmethod - def iam_delete_policy(self, policy_arn: str) -> dict: - """Deletes the specified managed policy""" - - @abstractmethod - def iam_delete_user(self, user_name: str) -> dict: - """Deletes the specified IAM user""" - - @abstractmethod - def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - """Deletes the specified inline policy that is embedded in the specified IAM user""" - - @abstractmethod - def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - """Removes the specified managed policy from the specified IAM group""" - - @abstractmethod - def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - """Removes the specified managed policy from the specified user""" - - @abstractmethod - def iam_get_group(self, group_name: str) -> dict: - """Returns a list of IAM users that are in the specified IAM group""" - - @abstractmethod - def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - """Retrieves the specified inline policy document that is embedded in the specified IAM group""" - - @abstractmethod - def iam_get_policy(self, policy_arn: str) -> dict: - """Retrieves information about the specified managed policy""" - - @abstractmethod - def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - """Retrieves information about the specified version of the specified managed policy""" - - @abstractmethod - def iam_get_user(self, user_name: str) -> dict: - """Retrieves information about the specified IAM user""" - - @abstractmethod - def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - """Retrieves the specified inline policy document that is embedded in the specified IAM user""" - - @abstractmethod - def iam_list_access_keys(self, user_name: str) -> dict: - """Returns information about the access key IDs associated with the specified IAM user""" - - @abstractmethod - def iam_list_attached_group_policies(self, group_name: str) -> dict: - """Lists all managed policies that are attached to the specified IAM group""" - - @abstractmethod - def iam_list_attached_user_policies(self, user_name: str) -> dict: - """Lists all managed policies that are attached to the specified IAM user""" - - @abstractmethod - def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - """Lists all IAM users, groups, and roles that the specified managed policy is attached to""" - - @abstractmethod - def iam_list_group_policies(self, group_name: str) -> dict: - """Lists the names of the inline policies that are embedded in the specified IAM group""" - - @abstractmethod - def iam_list_groups(self) -> dict: - """Lists the IAM groups""" - - @abstractmethod - def iam_list_groups_for_user(self, user_name: str) -> dict: - """Lists the IAM groups that the specified IAM user belongs to""" - - @abstractmethod - def iam_list_policies(self) -> dict: - """Lists all the managed policies that are available in your AWS account""" - - @abstractmethod - def iam_list_policy_versions(self, policy_arn: str) -> dict: - """Lists information about the versions of the specified managed policy""" - - @abstractmethod - def iam_list_user_policies(self, user_name: str) -> dict: - """Lists the names of the inline policies embedded in the specified IAM user""" - - @abstractmethod - def iam_list_users(self) -> dict: - """Lists the IAM users""" - - @abstractmethod - def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - """Adds or updates an inline policy document that is embedded in the specified IAM group""" - - @abstractmethod - def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - """Adds or updates an inline policy document that is embedded in the specified IAM user""" - - @abstractmethod - def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - """Removes the specified user from the specified group""" - - @abstractmethod - def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - """Updates the name and/or the path of the specified IAM group""" - - @abstractmethod - def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - """Updates the name and/or the path of the specified IAM user""" - - @abstractmethod - def iam_tag_user(self, user_name: str, tags: list) -> dict: - """Adds one or more tags to an IAM user""" - - @abstractmethod - def iam_list_user_tags(self, user_name: str) -> dict: - """List tags of IAM user""" - - @abstractmethod - def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - """Removes the specified tags from the user""" - - # MFA methods - @abstractmethod - def iam_create_virtual_mfa_device( - self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None - ) -> tuple: - """Creates a new virtual MFA device""" - - @abstractmethod - def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: - """Deactivates the specified MFA device and removes it from association with the user name""" - - @abstractmethod - def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: - """Deletes a virtual MFA device""" - - @abstractmethod - def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: - """Enables the specified MFA device and associates it with the specified IAM user""" - - @abstractmethod - def iam_list_virtual_mfa_devices(self) -> dict: - """Lists the MFA devices for an IAM user""" - - @abstractmethod - def sts_get_session_token( - self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None - ) -> tuple: - """Get session token for user""" diff --git a/src/frostfs_testlib/shell/__init__.py b/src/frostfs_testlib/shell/__init__.py index 980d119..0300ff8 100644 --- a/src/frostfs_testlib/shell/__init__.py +++ b/src/frostfs_testlib/shell/__init__.py @@ -1,3 +1,3 @@ from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell from frostfs_testlib.shell.local_shell import LocalShell -from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell +from frostfs_testlib.shell.ssh_shell import SSHShell diff --git a/src/frostfs_testlib/shell/command_inspectors.py b/src/frostfs_testlib/shell/command_inspectors.py index 8fe2f34..8486f43 100644 --- a/src/frostfs_testlib/shell/command_inspectors.py +++ b/src/frostfs_testlib/shell/command_inspectors.py @@ -7,23 +7,7 @@ class SudoInspector(CommandInspector): If command is already prepended with sudo, then has no effect. """ - def inspect(self, original_command: str, command: str) -> str: + def inspect(self, command: str) -> str: if not command.startswith("sudo"): return f"sudo {command}" return command - - -class SuInspector(CommandInspector): - """Allows to run command as another user via sudo su call - - If command is already prepended with sudo su, then has no effect. - """ - - def __init__(self, user: str) -> None: - self.user = user - - def inspect(self, original_command: str, command: str) -> str: - if not original_command.startswith("sudo su"): - cmd = original_command.replace('"', '\\"').replace("\$", "\\\\\\$") - return f'sudo su - {self.user} -c "{cmd}"' - return original_command diff --git a/src/frostfs_testlib/shell/interfaces.py b/src/frostfs_testlib/shell/interfaces.py index a8d3325..219bc7c 100644 --- a/src/frostfs_testlib/shell/interfaces.py +++ b/src/frostfs_testlib/shell/interfaces.py @@ -22,12 +22,11 @@ class CommandInspector(ABC): """Interface of inspector that processes command text before execution.""" @abstractmethod - def inspect(self, original_command: str, command: str) -> str: + def inspect(self, command: str) -> str: """Transforms command text and returns modified command. Args: command: Command to transform with this inspector. - original_command: Untransformed command to transform with this inspector. Depending on type of the inspector it might be required to modify original command Returns: Transformed command text. @@ -48,7 +47,6 @@ class CommandOptions: check: Controls whether to check return code of the command. Set to False to ignore non-zero return codes. no_log: Do not print output to logger if True. - extra_inspectors: Exctra command inspectors to process command """ interactive_inputs: Optional[list[InteractiveInput]] = None @@ -56,7 +54,6 @@ class CommandOptions: timeout: Optional[int] = None check: bool = True no_log: bool = False - extra_inspectors: Optional[list[CommandInspector]] = None def __post_init__(self): if self.timeout is None: diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 746070f..12f450a 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -1,18 +1,16 @@ import logging import subprocess import tempfile -from contextlib import nullcontext from datetime import datetime from typing import IO, Optional import pexpect -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import MORE_LOG +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("frostfs.testlib.shell") -step_context = reporter.step if MORE_LOG == "1" else nullcontext +reporter = get_reporter() class LocalShell(Shell): @@ -26,22 +24,20 @@ class LocalShell(Shell): # If no options were provided, use default options options = options or CommandOptions() - original_command = command - extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] - for inspector in [*self.command_inspectors, *extra_inspectors]: - command = inspector.inspect(original_command, command) + for inspector in self.command_inspectors: + command = inspector.inspect(command) - with step_context(f"Executing command: {command}"): - if options.interactive_inputs: - return self._exec_interactive(command, options) - return self._exec_non_interactive(command, options) + logger.info(f"Executing command: {command}") + if options.interactive_inputs: + return self._exec_interactive(command, options) + return self._exec_non_interactive(command, options) def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: start_time = datetime.utcnow() log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output try: - command_process = pexpect.spawn(command, timeout=options.timeout, use_poll=True) + command_process = pexpect.spawn(command, timeout=options.timeout) except (pexpect.ExceptionPexpect, OSError) as exc: raise RuntimeError(f"Command: {command}") from exc @@ -63,7 +59,8 @@ class LocalShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n" + f"Command: {command}\nreturn code: {result.return_code}\n" + f"Output: {result.stdout}" ) return result @@ -94,7 +91,11 @@ class LocalShell(Shell): stderr="", return_code=exc.returncode, ) - raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc + raise RuntimeError( + f"Command: {command}\nError:\n" + f"return code: {exc.returncode}\n" + f"output: {exc.output}" + ) from exc except OSError as exc: raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc finally: @@ -128,19 +129,22 @@ class LocalShell(Shell): end_time: datetime, result: Optional[CommandResult], ) -> None: - if not result: - logger.warning(f"Command: {command}\n" f"Error: result is None") - return - - status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning) - log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}") - - elapsed_time = end_time - start_time - command_attachment = ( - f"COMMAND: {command}\n" - f"RETCODE: {result.return_code}\n\n" - f"STDOUT:\n{result.stdout}\n" - f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + # TODO: increase logging level if return code is non 0, should be warning at least + logger.info( + f"Command: {command}\n" + f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n" + f"return code: {result.return_code if result else ''} " + f"\nOutput: {result.stdout if result else ''}" ) - reporter.attach(command_attachment, "Command execution.txt") + + if result: + elapsed_time = end_time - start_time + command_attachment = ( + f"COMMAND: {command}\n" + f"RETCODE: {result.return_code}\n\n" + f"STDOUT:\n{result.stdout}\n" + f"STDERR:\n{result.stderr}\n" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + ) + with reporter.step(f"COMMAND: {command}"): + reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index e718b4d..6ef3dfb 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -6,111 +6,24 @@ from functools import lru_cache, wraps from time import sleep from typing import ClassVar, Optional, Tuple -from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception +from paramiko import ( + AutoAddPolicy, + Channel, + ECDSAKey, + Ed25519Key, + PKey, + RSAKey, + SSHClient, + SSHException, + ssh_exception, +) from paramiko.ssh_exception import AuthenticationException -from frostfs_testlib import reporter -from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("frostfs.testlib.shell") - - -class SshConnectionProvider: - SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4 - SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10 - CONNECTION_TIMEOUT = 60 - - instance = None - connections: dict[str, SSHClient] = {} - creds: dict[str, SshCredentials] = {} - - def __new__(cls): - if not cls.instance: - cls.instance = super(SshConnectionProvider, cls).__new__(cls) - return cls.instance - - def store_creds(self, host: str, ssh_creds: SshCredentials): - self.creds[host] = ssh_creds - - def provide(self, host: str, port: str) -> SSHClient: - if host not in self.creds: - raise RuntimeError(f"Please add credentials for host {host}") - - if host in self.connections: - client = self.connections[host] - if client: - return client - - creds = self.creds[host] - client = self._create_connection(host, port, creds) - self.connections[host] = client - return client - - def drop(self, host: str): - if host in self.connections: - client = self.connections.pop(host) - client.close() - - def drop_all(self): - hosts = list(self.connections.keys()) - for host in hosts: - self.drop(host) - - def _create_connection( - self, - host: str, - port: str, - creds: SshCredentials, - ) -> SSHClient: - for attempt in range(self.SSH_CONNECTION_ATTEMPTS): - connection = SSHClient() - connection.set_missing_host_key_policy(AutoAddPolicy()) - try: - if creds.ssh_key_path: - logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " - f"{creds.ssh_key_path} (attempt {attempt})" - ) - connection.connect( - hostname=host, - port=port, - username=creds.ssh_login, - pkey=_load_private_key(creds.ssh_key_path, creds.ssh_key_passphrase), - timeout=self.CONNECTION_TIMEOUT, - ) - else: - logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})" - ) - connection.connect( - hostname=host, - port=port, - username=creds.ssh_login, - password=creds.ssh_password, - timeout=self.CONNECTION_TIMEOUT, - ) - return connection - except AuthenticationException: - connection.close() - logger.exception(f"Can't connect to host {host}") - raise - except ( - SSHException, - ssh_exception.NoValidConnectionsError, - AttributeError, - socket.timeout, - OSError, - ) as exc: - connection.close() - can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS - if can_retry: - logger.warn( - f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}" - ) - sleep(self.SSH_ATTEMPTS_INTERVAL) - continue - logger.exception(f"Can't connect to host {host}") - raise HostIsNotAvailable(host) from exc +reporter = get_reporter() class HostIsNotAvailable(Exception): @@ -123,7 +36,9 @@ class HostIsNotAvailable(Exception): def log_command(func): @wraps(func) - def wrapper(shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs) -> CommandResult: + def wrapper( + shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs + ) -> CommandResult: command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n") with reporter.step(command_info): logger.info(f'Execute command "{command}" on "{shell.host}"') @@ -176,6 +91,9 @@ class SSHShell(Shell): # to allow remote command to flush its output buffer DELAY_AFTER_EXIT = 0.2 + SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 3 + CONNECTION_TIMEOUT = 90 + def __init__( self, host: str, @@ -185,34 +103,31 @@ class SSHShell(Shell): private_key_passphrase: Optional[str] = None, port: str = "22", command_inspectors: Optional[list[CommandInspector]] = None, - custom_environment: Optional[dict] = None ) -> None: super().__init__() - self.connection_provider = SshConnectionProvider() - self.connection_provider.store_creds( - host, SshCredentials(login, password, private_key_path, private_key_passphrase) - ) self.host = host self.port = port - + self.login = login + self.password = password + self.private_key_path = private_key_path + self.private_key_passphrase = private_key_passphrase self.command_inspectors = command_inspectors or [] - - self.environment = custom_environment + self.__connection: Optional[SSHClient] = None @property def _connection(self): - return self.connection_provider.provide(self.host, self.port) + if not self.__connection: + self.__connection = self._create_connection() + return self.__connection def drop(self): - self.connection_provider.drop(self.host) + self._reset_connection() def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: options = options or CommandOptions() - original_command = command - extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] - for inspector in [*self.command_inspectors, *extra_inspectors]: - command = inspector.inspect(original_command, command) + for inspector in self.command_inspectors: + command = inspector.inspect(command) if options.interactive_inputs: result = self._exec_interactive(command, options) @@ -221,13 +136,15 @@ class SSHShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n" + f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" ) return result @log_command def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: - stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True, environment=self.environment) + stdin, stdout, stderr = self._connection.exec_command( + command, timeout=options.timeout, get_pty=True + ) for interactive_input in options.interactive_inputs: input = interactive_input.input if not input.endswith("\n"): @@ -254,7 +171,7 @@ class SSHShell(Shell): @log_command def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: try: - stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, environment=self.environment) + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout) if options.close_stdin: stdin.close() @@ -276,7 +193,7 @@ class SSHShell(Shell): socket.timeout, ) as exc: logger.exception(f"Can't execute command {command} on host: {self.host}") - self.drop() + self._reset_connection() raise HostIsNotAvailable(self.host) from exc def _read_channels( @@ -331,3 +248,57 @@ class SSHShell(Shell): full_stderr = b"".join(stderr_chunks) return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore")) + + def _create_connection(self, attempts: int = SSH_CONNECTION_ATTEMPTS) -> SSHClient: + for attempt in range(attempts): + connection = SSHClient() + connection.set_missing_host_key_policy(AutoAddPolicy()) + try: + if self.private_key_path: + logger.info( + f"Trying to connect to host {self.host} as {self.login} using SSH key " + f"{self.private_key_path} (attempt {attempt})" + ) + connection.connect( + hostname=self.host, + port=self.port, + username=self.login, + pkey=_load_private_key(self.private_key_path, self.private_key_passphrase), + timeout=self.CONNECTION_TIMEOUT, + ) + else: + logger.info( + f"Trying to connect to host {self.host} as {self.login} using password " + f"(attempt {attempt})" + ) + connection.connect( + hostname=self.host, + port=self.port, + username=self.login, + password=self.password, + timeout=self.CONNECTION_TIMEOUT, + ) + return connection + except AuthenticationException: + connection.close() + logger.exception(f"Can't connect to host {self.host}") + raise + except ( + SSHException, + ssh_exception.NoValidConnectionsError, + AttributeError, + socket.timeout, + OSError, + ) as exc: + connection.close() + can_retry = attempt + 1 < attempts + if can_retry: + logger.warn(f"Can't connect to host {self.host}, will retry. Error: {exc}") + continue + logger.exception(f"Can't connect to host {self.host}") + raise HostIsNotAvailable(self.host) from exc + + def _reset_connection(self) -> None: + if self.__connection: + self.__connection.close() + self.__connection = None diff --git a/src/frostfs_testlib/steps/acl.py b/src/frostfs_testlib/steps/acl.py index da407b6..0ef101b 100644 --- a/src/frostfs_testlib/steps/acl.py +++ b/src/frostfs_testlib/steps/acl.py @@ -8,23 +8,29 @@ from typing import List, Optional, Union import base58 -from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.acl import EACL_LIFETIME, FROSTFS_CONTRACT_CACHE_TIMEOUT, EACLPubKey, EACLRole, EACLRule -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.storage.dataclasses.acl import ( + EACL_LIFETIME, + FROSTFS_CONTRACT_CACHE_TIMEOUT, + EACLPubKey, + EACLRole, + EACLRule, +) from frostfs_testlib.utils import wallet_utils +reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step("Get extended ACL") -def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optional[str]: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) +@reporter.step_deco("Get extended ACL") +def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) try: - result = cli.container.get_eacl(rpc_endpoint=endpoint, cid=cid) + result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid) except RuntimeError as exc: logger.info("Extended ACL table is not set for this container") logger.info(f"Got exception while getting eacl: {exc}") @@ -34,17 +40,18 @@ def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optio return result.stdout -@reporter.step("Set extended ACL") +@reporter.step_deco("Set extended ACL") def set_eacl( - wallet: WalletInfo, + wallet_path: str, cid: str, eacl_table_path: str, shell: Shell, endpoint: str, session_token: Optional[str] = None, ) -> None: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) cli.container.set_eacl( + wallet=wallet_path, rpc_endpoint=endpoint, cid=cid, table=eacl_table_path, @@ -60,7 +67,7 @@ def _encode_cid_for_eacl(cid: str) -> str: def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json") - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list) with open(table_file_path, "r") as file: @@ -71,7 +78,7 @@ def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: def form_bearertoken_file( - wallet: WalletInfo, + wif: str, cid: str, eacl_rule_list: List[Union[EACLRule, EACLPubKey]], shell: Shell, @@ -86,7 +93,7 @@ def form_bearertoken_file( enc_cid = _encode_cid_for_eacl(cid) if cid else None file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - eacl = get_eacl(wallet, cid, shell, endpoint) + eacl = get_eacl(wif, cid, shell, endpoint) json_eacl = dict() if eacl: eacl = eacl.replace("eACL: ", "").split("Signature")[0] @@ -127,7 +134,7 @@ def form_bearertoken_file( if sign: sign_bearer( shell=shell, - wallet=wallet, + wallet_path=wif, eacl_rules_file_from=file_path, eacl_rules_file_to=file_path, json=True, @@ -158,19 +165,27 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]: return rules -def sign_bearer(shell: Shell, wallet: WalletInfo, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: - frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - frostfscli.util.sign_bearer_token(eacl_rules_file_from, eacl_rules_file_to, json=json) +def sign_bearer( + shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool +) -> None: + frostfscli = FrostfsCli( + shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG + ) + frostfscli.util.sign_bearer_token( + wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json + ) -@reporter.step("Wait for eACL cache expired") +@reporter.step_deco("Wait for eACL cache expired") def wait_for_cache_expired(): sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT) return -@reporter.step("Return bearer token in base64 to caller") -def bearer_token_base64_from_file(bearer_path: str) -> str: +@reporter.step_deco("Return bearer token in base64 to caller") +def bearer_token_base64_from_file( + bearer_path: str, +) -> str: with open(bearer_path, "rb") as file: signed = file.read() return base64.b64encode(signed).decode("utf-8") diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 092b1a3..89070c4 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -1,28 +1,29 @@ import json import logging -import re from dataclasses import dataclass from time import sleep from typing import Optional, Union -from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node -from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.file_utils import generate_file, get_file_hash +reporter = get_reporter() logger = logging.getLogger("NeoLogger") @dataclass class StorageContainerInfo: id: str - wallet: WalletInfo + wallet_file: WalletInfo class StorageContainer: @@ -39,10 +40,13 @@ class StorageContainer: def get_id(self) -> str: return self.storage_container_info.id - def get_wallet(self) -> str: - return self.storage_container_info.wallet + def get_wallet_path(self) -> str: + return self.storage_container_info.wallet_file.path - @reporter.step("Generate new object and put in container") + def get_wallet_config_path(self) -> str: + return self.storage_container_info.wallet_file.config_path + + @reporter.step_deco("Generate new object and put in container") def generate_object( self, size: int, @@ -55,34 +59,37 @@ class StorageContainer: file_hash = get_file_hash(file_path) container_id = self.get_id() - wallet = self.get_wallet() + wallet_path = self.get_wallet_path() + wallet_config = self.get_wallet_config_path() with reporter.step(f"Put object with size {size} to container {container_id}"): if endpoint: object_id = put_object( - wallet=wallet, + wallet=wallet_path, path=file_path, cid=container_id, expire_at=expire_at, shell=self.shell, endpoint=endpoint, bearer=bearer_token, + wallet_config=wallet_config, ) else: object_id = put_object_to_random_node( - wallet=wallet, + wallet=wallet_path, path=file_path, cid=container_id, expire_at=expire_at, shell=self.shell, cluster=self.cluster, bearer=bearer_token, + wallet_config=wallet_config, ) storage_object = StorageObjectInfo( container_id, object_id, size=size, - wallet=wallet, + wallet_file_path=wallet_path, file_path=file_path, file_hash=file_hash, ) @@ -93,31 +100,29 @@ class StorageContainer: DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" -DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" -@reporter.step("Create Container") +@reporter.step_deco("Create Container") def create_container( - wallet: WalletInfo, + wallet: str, shell: Shell, endpoint: str, rule: str = DEFAULT_PLACEMENT_RULE, basic_acl: str = "", attributes: Optional[dict] = None, session_token: str = "", + session_wallet: str = "", name: Optional[str] = None, options: Optional[dict] = None, await_mode: bool = True, wait_for_creation: bool = True, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> str: """ A wrapper for `frostfs-cli container create` call. Args: - wallet (WalletInfo): a wallet on whose behalf a container is created + wallet (str): a wallet on whose behalf a container is created rule (optional, str): placement rule for container basic_acl (optional, str): an ACL for container, will be appended to `--basic-acl` key @@ -139,12 +144,11 @@ def create_container( (str): CID of the created container """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) result = cli.container.create( rpc_endpoint=endpoint, + wallet=session_wallet if session_wallet else wallet, policy=rule, - nns_name=nns_name, - nns_zone=nns_zone, basic_acl=basic_acl, attributes=attributes, name=name, @@ -164,17 +168,23 @@ def create_container( return cid -def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1): +def wait_for_container_creation( + wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1 +): for _ in range(attempts): containers = list_containers(wallet, shell, endpoint) if cid in containers: return logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") sleep(sleep_interval) - raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") + raise RuntimeError( + f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting" + ) -def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1): +def wait_for_container_deletion( + wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1 +): for _ in range(attempts): try: get_container(wallet, cid, shell=shell, endpoint=endpoint) @@ -187,27 +197,30 @@ def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endp raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.") -@reporter.step("List Containers") -def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: +@reporter.step_deco("List Containers") +def list_containers( + wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT +) -> list[str]: """ A wrapper for `frostfs-cli container list` call. It returns all the available containers for the given wallet. Args: - wallet (WalletInfo): a wallet on whose behalf we list the containers + wallet (str): a wallet on whose behalf we list the containers shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key timeout: Timeout for the operation. Returns: (list): list of containers """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout) + logger.info(f"Containers: \n{result}") return result.stdout.split() -@reporter.step("List Objects in container") +@reporter.step_deco("List Objects in container") def list_objects( - wallet: WalletInfo, + wallet: str, shell: Shell, container_id: str, endpoint: str, @@ -217,7 +230,7 @@ def list_objects( A wrapper for `frostfs-cli container list-objects` call. It returns all the available objects in container. Args: - wallet (WalletInfo): a wallet on whose behalf we list the containers objects + wallet (str): a wallet on whose behalf we list the containers objects shell: executor for cli command container_id: cid of container endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key @@ -225,15 +238,17 @@ def list_objects( Returns: (list): list of containers """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.container.list_objects(rpc_endpoint=endpoint, cid=container_id, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + result = cli.container.list_objects( + rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout + ) logger.info(f"Container objects: \n{result}") return result.stdout.split() -@reporter.step("Get Container") +@reporter.step_deco("Get Container") def get_container( - wallet: WalletInfo, + wallet: str, cid: str, shell: Shell, endpoint: str, @@ -244,7 +259,7 @@ def get_container( A wrapper for `frostfs-cli container get` call. It extracts container's attributes and rearranges them into a more compact view. Args: - wallet (WalletInfo): path to a wallet on whose behalf we get the container + wallet (str): path to a wallet on whose behalf we get the container cid (str): ID of the container to get shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key @@ -254,8 +269,10 @@ def get_container( (dict, str): dict of container attributes """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.container.get(rpc_endpoint=endpoint, cid=cid, json_mode=json_mode, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + result = cli.container.get( + rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout + ) if not json_mode: return result.stdout @@ -269,37 +286,40 @@ def get_container( return container_info -@reporter.step("Delete Container") +@reporter.step_deco("Delete Container") # TODO: make the error message about a non-found container more user-friendly def delete_container( - wallet: WalletInfo, + wallet: str, cid: str, shell: Shell, endpoint: str, force: bool = False, session_token: Optional[str] = None, await_mode: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> None: """ A wrapper for `frostfs-cli container delete` call. Args: - await_mode: Block execution until container is removed. - wallet (WalletInfo): path to a wallet on whose behalf we delete the container + wallet (str): path to a wallet on whose behalf we delete the container cid (str): ID of the container to delete shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key force (bool): do not check whether container contains locks and remove immediately session_token: a path to session token file + timeout: Timeout for the operation. This function doesn't return anything. """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) cli.container.delete( + wallet=wallet, cid=cid, rpc_endpoint=endpoint, force=force, session=session_token, await_mode=await_mode, + timeout=timeout, ) @@ -329,23 +349,11 @@ def _parse_cid(output: str) -> str: return splitted[1] -@reporter.step("Search for nodes with a container") -def search_nodes_with_container( - wallet: WalletInfo, - cid: str, - shell: Shell, - endpoint: str, - cluster: Cluster, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> list[ClusterNode]: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - result = cli.container.search_node(rpc_endpoint=endpoint, cid=cid, timeout=timeout) - - pattern = r"[0-9]+(?:\.[0-9]+){3}" - nodes_ip = list(set(re.findall(pattern, result.stdout))) - - with reporter.step(f"nodes ips = {nodes_ip}"): - nodes_list = cluster.get_nodes_by_ip(nodes_ip) - - with reporter.step(f"Return nodes - {nodes_list}"): - return nodes_list +@reporter.step_deco("Search container by name") +def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str): + list_cids = list_containers(wallet, shell, endpoint) + for cid in list_cids: + cont_info = get_container(wallet, cid, shell, endpoint, True) + if cont_info.get("attributes", {}).get("Name", None) == name: + return cid + return None diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 7f8391d..8be7982 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -5,26 +5,22 @@ import re import uuid from typing import Any, Optional -from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli.neogo import NeoGo +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE -from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing import wait_for_success +from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.utils import json_utils -from frostfs_testlib.utils.cli_utils import parse_netmap_output -from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") +reporter = get_reporter() -@reporter.step("Get object from random node") +@reporter.step_deco("Get object from random node") def get_object_from_random_node( - wallet: WalletInfo, + wallet: str, cid: str, oid: str, shell: Shell, @@ -32,6 +28,7 @@ def get_object_from_random_node( bearer: Optional[str] = None, write_object: Optional[str] = None, xhdr: Optional[dict] = None, + wallet_config: Optional[str] = None, no_progress: bool = True, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -47,6 +44,7 @@ def get_object_from_random_node( cluster: cluster object bearer (optional, str): path to Bearer Token file, appends to `--bearer` key write_object (optional, str): path to downloaded file, appends to `--file` key + wallet_config(optional, str): path to the wallet config no_progress(optional, bool): do not show progress bar xhdr (optional, dict): Request X-Headers in form of Key=Value session (optional, dict): path to a JSON-encoded container session token @@ -64,15 +62,16 @@ def get_object_from_random_node( bearer, write_object, xhdr, + wallet_config, no_progress, session, timeout, ) -@reporter.step("Get object from {endpoint}") +@reporter.step_deco("Get object from {endpoint}") def get_object( - wallet: WalletInfo, + wallet: str, cid: str, oid: str, shell: Shell, @@ -80,21 +79,23 @@ def get_object( bearer: Optional[str] = None, write_object: Optional[str] = None, xhdr: Optional[dict] = None, + wallet_config: Optional[str] = None, no_progress: bool = True, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> TestFile: +) -> str: """ GET from FrostFS. Args: - wallet (WalletInfo): wallet on whose behalf GET is done + wallet (str): wallet on whose behalf GET is done cid (str): ID of Container where we get the Object from oid (str): Object ID shell: executor for cli command bearer: path to Bearer Token file, appends to `--bearer` key write_object: path to downloaded file, appends to `--file` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config(optional, str): path to the wallet config no_progress(optional, bool): do not show progress bar xhdr (optional, dict): Request X-Headers in form of Key=Value session (optional, dict): path to a JSON-encoded container session token @@ -105,14 +106,15 @@ def get_object( if not write_object: write_object = str(uuid.uuid4()) - test_file = TestFile(os.path.join(ASSETS_DIR, write_object)) + file_path = os.path.join(ASSETS_DIR, write_object) - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) cli.object.get( rpc_endpoint=endpoint, + wallet=wallet, cid=cid, oid=oid, - file=test_file, + file=file_path, bearer=bearer, no_progress=no_progress, xhdr=xhdr, @@ -120,18 +122,19 @@ def get_object( timeout=timeout, ) - return test_file + return file_path -@reporter.step("Get Range Hash from {endpoint}") +@reporter.step_deco("Get Range Hash from {endpoint}") def get_range_hash( - wallet: WalletInfo, + wallet: str, cid: str, oid: str, range_cut: str, shell: Shell, endpoint: str, bearer: Optional[str] = None, + wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -148,15 +151,17 @@ def get_range_hash( range_cut: Range to take hash from in the form offset1:length1,..., value to pass to the `--range` parameter endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Values session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. timeout: Timeout for the operation. Returns: None """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) result = cli.object.hash( rpc_endpoint=endpoint, + wallet=wallet, cid=cid, oid=oid, range=range_cut, @@ -170,17 +175,17 @@ def get_range_hash( return result.stdout.split(":")[1].strip() -@reporter.step("Put object to random node") +@reporter.step_deco("Put object to random node") def put_object_to_random_node( - wallet: WalletInfo, + wallet: str, path: str, cid: str, shell: Shell, cluster: Cluster, bearer: Optional[str] = None, - copies_number: Optional[int] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, + wallet_config: Optional[str] = None, expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, @@ -196,9 +201,9 @@ def put_object_to_random_node( shell: executor for cli command cluster: cluster under test bearer: path to Bearer Token file, appends to `--bearer` key - copies_number: Number of copies of the object to store within the RPC call attributes: User attributes in form of Key1=Value1,Key2=Value2 cluster: cluster under test + wallet_config: path to the wallet config no_progress: do not show progress bar expire_at: Last epoch in the life of the object xhdr: Request X-Headers in form of Key=Value @@ -216,9 +221,9 @@ def put_object_to_random_node( shell, endpoint, bearer, - copies_number, attributes, xhdr, + wallet_config, expire_at, no_progress, session, @@ -226,17 +231,17 @@ def put_object_to_random_node( ) -@reporter.step("Put object at {endpoint} in container {cid}") +@reporter.step_deco("Put object at {endpoint} in container {cid}") def put_object( - wallet: WalletInfo, + wallet: str, path: str, cid: str, shell: Shell, endpoint: str, bearer: Optional[str] = None, - copies_number: Optional[int] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, + wallet_config: Optional[str] = None, expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, @@ -251,9 +256,9 @@ def put_object( cid: ID of Container where we get the Object from shell: executor for cli command bearer: path to Bearer Token file, appends to `--bearer` key - copies_number: Number of copies of the object to store within the RPC call attributes: User attributes in form of Key1=Value1,Key2=Value2 endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config: path to the wallet config no_progress: do not show progress bar expire_at: Last epoch in the life of the object xhdr: Request X-Headers in form of Key=Value @@ -263,14 +268,14 @@ def put_object( (str): ID of uploaded Object """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) result = cli.object.put( rpc_endpoint=endpoint, + wallet=wallet, file=path, cid=cid, attributes=attributes, bearer=bearer, - copies_number=copies_number, expire_at=expire_at, no_progress=no_progress, xhdr=xhdr, @@ -284,14 +289,15 @@ def put_object( return oid.strip() -@reporter.step("Delete object {cid}/{oid} from {endpoint}") +@reporter.step_deco("Delete object {cid}/{oid} from {endpoint}") def delete_object( - wallet: WalletInfo, + wallet: str, cid: str, oid: str, shell: Shell, endpoint: str, bearer: str = "", + wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -306,6 +312,7 @@ def delete_object( shell: executor for cli command bearer: path to Bearer Token file, appends to `--bearer` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Value session: path to a JSON-encoded container session token timeout: Timeout for the operation. @@ -313,9 +320,10 @@ def delete_object( (str): Tombstone ID """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) result = cli.object.delete( rpc_endpoint=endpoint, + wallet=wallet, cid=cid, oid=oid, bearer=bearer, @@ -329,14 +337,15 @@ def delete_object( return tombstone.strip() -@reporter.step("Get Range") +@reporter.step_deco("Get Range") def get_range( - wallet: WalletInfo, + wallet: str, cid: str, oid: str, range_cut: str, shell: Shell, endpoint: str, + wallet_config: Optional[str] = None, bearer: str = "", xhdr: Optional[dict] = None, session: Optional[str] = None, @@ -353,35 +362,37 @@ def get_range( shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key bearer: path to Bearer Token file, appends to `--bearer` key + wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Value session: path to a JSON-encoded container session token timeout: Timeout for the operation. Returns: (str, bytes) - path to the file with range content and content of this file as bytes """ - test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) + range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) cli.object.range( rpc_endpoint=endpoint, + wallet=wallet, cid=cid, oid=oid, range=range_cut, - file=test_file, + file=range_file_path, bearer=bearer, xhdr=xhdr, session=session, timeout=timeout, ) - with open(test_file, "rb") as file: + with open(range_file_path, "rb") as file: content = file.read() - return test_file, content + return range_file_path, content -@reporter.step("Lock Object") +@reporter.step_deco("Lock Object") def lock_object( - wallet: WalletInfo, + wallet: str, cid: str, oid: str, shell: Shell, @@ -391,6 +402,7 @@ def lock_object( address: Optional[str] = None, bearer: Optional[str] = None, session: Optional[str] = None, + wallet_config: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -417,12 +429,13 @@ def lock_object( Lock object ID """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) result = cli.object.lock( rpc_endpoint=endpoint, lifetime=lifetime, expire_at=expire_at, address=address, + wallet=wallet, cid=cid, oid=oid, bearer=bearer, @@ -438,15 +451,16 @@ def lock_object( return oid.strip() -@reporter.step("Search object") +@reporter.step_deco("Search object") def search_object( - wallet: WalletInfo, + wallet: str, cid: str, shell: Shell, endpoint: str, bearer: str = "", filters: Optional[dict] = None, expected_objects_list: Optional[list] = None, + wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, session: Optional[str] = None, phy: bool = False, @@ -464,6 +478,7 @@ def search_object( endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key filters: key=value pairs to filter Objects expected_objects_list: a list of ObjectIDs to compare found Objects with + wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Value session: path to a JSON-encoded container session token phy: Search physically stored objects. @@ -474,13 +489,16 @@ def search_object( list of found ObjectIDs """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) result = cli.object.search( rpc_endpoint=endpoint, + wallet=wallet, cid=cid, bearer=bearer, xhdr=xhdr, - filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, + filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] + if filters + else None, session=session, phy=phy, root=root, @@ -491,18 +509,25 @@ def search_object( if expected_objects_list: if sorted(found_objects) == sorted(expected_objects_list): - logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") + logger.info( + f"Found objects list '{found_objects}' " + f"is equal for expected list '{expected_objects_list}'" + ) else: - logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") + logger.warning( + f"Found object list {found_objects} " + f"is not equal to expected list '{expected_objects_list}'" + ) return found_objects -@reporter.step("Get netmap netinfo") +@reporter.step_deco("Get netmap netinfo") def get_netmap_netinfo( - wallet: WalletInfo, + wallet: str, shell: Shell, endpoint: str, + wallet_config: Optional[str] = None, address: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -512,7 +537,7 @@ def get_netmap_netinfo( Get netmap netinfo output from node Args: - wallet (WalletInfo): wallet on whose behalf request is done + wallet (str): wallet on whose behalf request is done shell: executor for cli command endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key address: Address of wallet account @@ -525,8 +550,9 @@ def get_netmap_netinfo( (dict): dict of parsed command output """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) output = cli.netmap.netinfo( + wallet=wallet, rpc_endpoint=endpoint, address=address, ttl=ttl, @@ -548,9 +574,9 @@ def get_netmap_netinfo( return settings -@reporter.step("Head object") +@reporter.step_deco("Head object") def head_object( - wallet: WalletInfo, + wallet: str, cid: str, oid: str, shell: Shell, @@ -560,6 +586,7 @@ def head_object( json_output: bool = True, is_raw: bool = False, is_direct: bool = False, + wallet_config: Optional[str] = None, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ): @@ -567,7 +594,7 @@ def head_object( HEAD an Object. Args: - wallet (WalletInfo): wallet on whose behalf HEAD is done + wallet (str): wallet on whose behalf HEAD is done cid (str): ID of Container where we get the Object from oid (str): ObjectID to HEAD shell: executor for cli command @@ -579,6 +606,7 @@ def head_object( turns into `--raw` key is_direct(optional, bool): send request directly to the node or not; this flag turns into `--ttl 1` key + wallet_config(optional, str): path to the wallet config xhdr (optional, dict): Request X-Headers in form of Key=Value session (optional, dict): path to a JSON-encoded container session token timeout: Timeout for the operation. @@ -589,9 +617,10 @@ def head_object( (str): HEAD response as a plain text """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) result = cli.object.head( rpc_endpoint=endpoint, + wallet=wallet, cid=cid, oid=oid, bearer=bearer, @@ -617,32 +646,32 @@ def head_object( fst_line_idx = result.stdout.find("\n") decoded = json.loads(result.stdout[fst_line_idx:]) - # if response - if "chunks" in decoded.keys(): - logger.info("decoding ec chunks") - return decoded["chunks"] - # If response is Complex Object header, it has `splitId` key if "splitId" in decoded.keys(): + logger.info("decoding split header") return json_utils.decode_split_header(decoded) # If response is Last or Linking Object header, # it has `header` dictionary and non-null `split` dictionary if "split" in decoded["header"].keys(): if decoded["header"]["split"]: + logger.info("decoding linking object") return json_utils.decode_linking_object(decoded) if decoded["header"]["objectType"] == "STORAGE_GROUP": + logger.info("decoding storage group") return json_utils.decode_storage_group(decoded) if decoded["header"]["objectType"] == "TOMBSTONE": + logger.info("decoding tombstone") return json_utils.decode_tombstone(decoded) + logger.info("decoding simple header") return json_utils.decode_simple_header(decoded) -@reporter.step("Run neo-go dump-keys") -def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict: +@reporter.step_deco("Run neo-go dump-keys") +def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: """ Run neo-go dump keys command @@ -666,7 +695,7 @@ def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict: return {address_id: wallet_key} -@reporter.step("Run neo-go query height") +@reporter.step_deco("Run neo-go query height") def neo_go_query_height(shell: Shell, endpoint: str) -> dict: """ Run neo-go query height command @@ -691,72 +720,8 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: latest_block = first_line.split(":") # taking second line from command's output contain wallet key second_line = output.split("\n")[1] - if second_line != "": - validated_state = second_line.split(":") - return { - latest_block[0].replace(":", ""): int(latest_block[1]), - validated_state[0].replace(":", ""): int(validated_state[1]), - } - return {latest_block[0].replace(":", ""): int(latest_block[1])} - - -@wait_for_success() -@reporter.step("Search object nodes") -def get_object_nodes( - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> list[ClusterNode]: - shell = alive_node.host.get_shell() - endpoint = alive_node.storage_node.get_rpc_endpoint() - wallet = alive_node.storage_node.get_remote_wallet_path() - wallet_config = alive_node.storage_node.get_remote_wallet_config_path() - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config) - - response = cli.object.nodes( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - ttl=1 if is_direct else None, - json=True, - xhdr=xhdr, - timeout=timeout, - verify_presence_all=verify_presence_all, - ) - - response_json = json.loads(response.stdout) - # Currently, the command will show expected and confirmed nodes. - # And we (currently) count only nodes which are both expected and confirmed - object_nodes_id = { - required_node - for data_object in response_json["data_objects"] - for required_node in data_object["required_nodes"] - if required_node in data_object["confirmed_nodes"] + validated_state = second_line.split(":") + return { + latest_block[0].replace(":", ""): int(latest_block[1]), + validated_state[0].replace(":", ""): int(validated_state[1]), } - - netmap_nodes_list = parse_netmap_output( - cli.netmap.snapshot( - rpc_endpoint=endpoint, - wallet=wallet, - ).stdout - ) - netmap_nodes = [ - netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id - ] - - object_nodes = [ - cluster_node - for netmap_node in netmap_nodes - for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) - ] - - return object_nodes diff --git a/src/frostfs_testlib/steps/cli/tree.py b/src/frostfs_testlib/steps/cli/tree.py deleted file mode 100644 index 4b0dfb3..0000000 --- a/src/frostfs_testlib/steps/cli/tree.py +++ /dev/null @@ -1,35 +0,0 @@ -import logging -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.plugins import load_plugin -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC -from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - -logger = logging.getLogger("NeoLogger") - - - -@reporter.step("Get Tree List") -def get_tree_list( - wallet: WalletInfo, - cid: str, - shell: Shell, - endpoint: str, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> None: - """ - A wrapper for `frostfs-cli tree list` call. - Args: - wallet (WalletInfo): path to a wallet on whose behalf we delete the container - cid (str): ID of the container to delete - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - timeout: Timeout for the operation. - This function doesn't return anything. - """ - - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout) diff --git a/src/frostfs_testlib/steps/complex_object_actions.py b/src/frostfs_testlib/steps/complex_object_actions.py index e1a7088..54e5fc2 100644 --- a/src/frostfs_testlib/steps/complex_object_actions.py +++ b/src/frostfs_testlib/steps/complex_object_actions.py @@ -12,14 +12,15 @@ import logging from typing import Optional, Tuple -from frostfs_testlib import reporter +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -44,7 +45,7 @@ def get_storage_object_chunks( with reporter.step(f"Get complex object chunks (f{storage_object.oid})"): split_object_id = get_link_object( - storage_object.wallet, + storage_object.wallet_file_path, storage_object.cid, storage_object.oid, shell, @@ -53,7 +54,7 @@ def get_storage_object_chunks( timeout=timeout, ) head = head_object( - storage_object.wallet, + storage_object.wallet_file_path, storage_object.cid, split_object_id, shell, @@ -96,7 +97,7 @@ def get_complex_object_split_ranges( chunks_ids = get_storage_object_chunks(storage_object, shell, cluster) for chunk_id in chunks_ids: head = head_object( - storage_object.wallet, + storage_object.wallet_file_path, storage_object.cid, chunk_id, shell, @@ -112,14 +113,15 @@ def get_complex_object_split_ranges( return ranges -@reporter.step("Get Link Object") +@reporter.step_deco("Get Link Object") def get_link_object( - wallet: WalletInfo, + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode], bearer: str = "", + wallet_config: str = DEFAULT_WALLET_CONFIG, is_direct: bool = True, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ): @@ -153,6 +155,7 @@ def get_link_object( is_raw=True, is_direct=is_direct, bearer=bearer, + wallet_config=wallet_config, timeout=timeout, ) if resp["link"]: @@ -163,9 +166,9 @@ def get_link_object( return None -@reporter.step("Get Last Object") +@reporter.step_deco("Get Last Object") def get_last_object( - wallet: WalletInfo, + wallet: str, cid: str, oid: str, shell: Shell, diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index 6ec5483..0d40f8d 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -2,9 +2,15 @@ import logging from time import sleep from typing import Optional -from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import ( + CLI_DEFAULT_TIMEOUT, + FROSTFS_ADM_CONFIG_PATH, + FROSTFS_ADM_EXEC, + FROSTFS_CLI_EXEC, + NEOGO_EXECUTABLE, +) from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.payment_neogo import get_contract_hash @@ -13,10 +19,11 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, Morp from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils import datetime_utils, wallet_utils +reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step("Get epochs from nodes") +@reporter.step_deco("Get epochs from nodes") def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: """ Get current epochs on each node. @@ -34,8 +41,10 @@ def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: return epochs_by_node -@reporter.step("Ensure fresh epoch") -def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None) -> int: +@reporter.step_deco("Ensure fresh epoch") +def ensure_fresh_epoch( + shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None +) -> int: # ensure new fresh epoch to avoid epoch switch during test session alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] current_epoch = get_epoch(shell, cluster, alive_node) @@ -45,17 +54,19 @@ def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[Stor return epoch -@reporter.step("Wait up to {timeout} seconds for nodes on cluster to align epochs") -def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): - @wait_for_success(timeout, 5, None, True) - def check_epochs(): - epochs_by_node = get_epochs_from_nodes(shell, cluster) - assert len(set(epochs_by_node.values())) == 1, f"unaligned epochs found: {epochs_by_node}" - - check_epochs() +@reporter.step_deco("Wait for epochs align in whole cluster") +@wait_for_success(60, 5) +def wait_for_epochs_align(shell: Shell, cluster: Cluster) -> None: + epochs = [] + for node in cluster.services(StorageNode): + epochs.append(get_epoch(shell, cluster, node)) + unique_epochs = list(set(epochs)) + assert ( + len(unique_epochs) == 1 + ), f"unaligned epochs found, {epochs}, count of unique epochs {len(unique_epochs)}" -@reporter.step("Get Epoch") +@reporter.step_deco("Get Epoch") def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] endpoint = alive_node.get_rpc_endpoint() @@ -68,8 +79,8 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] return int(epoch.stdout) -@reporter.step("Tick Epoch") -def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None, delta: Optional[int] = None): +@reporter.step_deco("Tick Epoch") +def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): """ Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) Args: @@ -81,24 +92,19 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] remote_shell = alive_node.host.get_shell() - if "force_transactions" not in alive_node.host.config.attributes: + if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) frostfs_adm = FrostfsAdm( shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH, ) - frostfs_adm.morph.force_new_epoch(delta=delta) + frostfs_adm.morph.force_new_epoch() return # Otherwise we tick epoch using transaction cur_epoch = get_epoch(shell, cluster) - if delta: - next_epoch = cur_epoch + delta - else: - next_epoch = cur_epoch + 1 - # Use first node by default ir_node = cluster.services(InnerRing)[0] # In case if no local_wallet_path is provided, we use wallet_path @@ -115,7 +121,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] wallet_password=ir_wallet_pass, scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell), method="newEpoch", - arguments=f"int:{next_epoch}", + arguments=f"int:{cur_epoch + 1}", multisig_hash=f"{ir_address}:Global", address=ir_address, rpc_endpoint=morph_endpoint, diff --git a/src/frostfs_testlib/storage/grpc_operations/__init__.py b/src/frostfs_testlib/steps/http/__init__.py similarity index 100% rename from src/frostfs_testlib/storage/grpc_operations/__init__.py rename to src/frostfs_testlib/steps/http/__init__.py diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py similarity index 61% rename from src/frostfs_testlib/steps/http_gate.py rename to src/frostfs_testlib/steps/http/http_gate.py index 51b0301..c9769fb 100644 --- a/src/frostfs_testlib/steps/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -10,105 +10,93 @@ from urllib.parse import quote_plus import requests -from frostfs_testlib import reporter -from frostfs_testlib.cli import GenericCli -from frostfs_testlib.clients.s3.aws_cli_client import command_options -from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE +from frostfs_testlib.s3.aws_cli_client import LONG_TIMEOUT from frostfs_testlib.shell import Shell -from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object -from frostfs_testlib.storage.cluster import ClusterNode, StorageNode -from frostfs_testlib.testing.test_control import retry -from frostfs_testlib.utils.file_utils import TestFile, get_file_hash +from frostfs_testlib.storage.cluster import StorageNode +from frostfs_testlib.utils.cli_utils import _cmd_run +from frostfs_testlib.utils.file_utils import get_file_hash + +reporter = get_reporter() logger = logging.getLogger("NeoLogger") -local_shell = LocalShell() +ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") -@reporter.step("Get via HTTP Gate") -def get_via_http_gate( - cid: str, - oid: str, - node: ClusterNode, - request_path: Optional[str] = None, - timeout: Optional[int] = 300, -): +@reporter.step_deco("Get via HTTP Gate") +def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[str] = None): """ This function gets given object from HTTP gate cid: container id to get object from - oid: object id / object key - node: node to make request + oid: object ID + endpoint: http gate endpoint request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] """ - request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - if request_path: - request = f"{node.http_gate.get_endpoint()}{request_path}" + # if `request_path` parameter omitted, use default + if request_path is None: + request = f"{endpoint}/get/{cid}/{oid}" + else: + request = f"{endpoint}{request_path}" - response = requests.get(request, stream=True, timeout=timeout, verify=False) - - if not response.ok: - raise Exception( - f"""Failed to get object via HTTP gate: - request: {response.request.path_url}, - response: {response.text}, - headers: {response.headers}, - status code: {response.status_code} {response.reason}""" - ) - - logger.info(f"Request: {request}") - _attach_allure_step(request, response.status_code) - - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) - with open(test_file, "wb") as file: - for chunk in response.iter_content(chunk_size=8192): - file.write(chunk) - - return test_file - - -@reporter.step("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Optional[int] = 300): - """ - This function gets given object from HTTP gate - cid: container id to get object from - prefix: common prefix - node: node to make request - """ - request = f"{node.http_gate.get_endpoint()}/zip/{cid}/{prefix}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False) + resp = requests.get(request, stream=True) if not resp.ok: raise Exception( f"""Failed to get object via HTTP gate: request: {resp.request.path_url}, response: {resp.text}, - headers: {resp.headers}, status code: {resp.status_code} {resp.reason}""" ) logger.info(f"Request: {request}") _attach_allure_step(request, resp.status_code) - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")) - with open(test_file, "wb") as file: + file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}") + with open(file_path, "wb") as file: + shutil.copyfileobj(resp.raw, file) + return file_path + + +@reporter.step_deco("Get via Zip HTTP Gate") +def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str): + """ + This function gets given object from HTTP gate + cid: container id to get object from + prefix: common prefix + endpoint: http gate endpoint + """ + request = f"{endpoint}/zip/{cid}/{prefix}" + resp = requests.get(request, stream=True) + + if not resp.ok: + raise Exception( + f"""Failed to get object via HTTP gate: + request: {resp.request.path_url}, + response: {resp.text}, + status code: {resp.status_code} {resp.reason}""" + ) + + logger.info(f"Request: {request}") + _attach_allure_step(request, resp.status_code) + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip") + with open(file_path, "wb") as file: shutil.copyfileobj(resp.raw, file) - with zipfile.ZipFile(test_file, "r") as zip_ref: + with zipfile.ZipFile(file_path, "r") as zip_ref: zip_ref.extractall(ASSETS_DIR) return os.path.join(os.getcwd(), ASSETS_DIR, prefix) -@reporter.step("Get via HTTP Gate by attribute") +@reporter.step_deco("Get via HTTP Gate by attribute") def get_via_http_gate_by_attribute( - cid: str, - attribute: dict, - node: ClusterNode, - request_path: Optional[str] = None, - timeout: Optional[int] = 300, + cid: str, attribute: dict, endpoint: str, request_path: Optional[str] = None ): """ This function gets given object from HTTP gate @@ -117,36 +105,35 @@ def get_via_http_gate_by_attribute( endpoint: http gate endpoint request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] """ - attr_name = list(attribute.keys())[0] attr_value = quote_plus(str(attribute.get(attr_name))) + # if `request_path` parameter ommited, use default + if request_path is None: + request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + else: + request = f"{endpoint}{request_path}" - request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" - if request_path: - request = f"{node.http_gate.get_endpoint()}{request_path}" - - resp = requests.get(request, stream=True, timeout=timeout, verify=False) + resp = requests.get(request, stream=True) if not resp.ok: raise Exception( f"""Failed to get object via HTTP gate: request: {resp.request.path_url}, response: {resp.text}, - headers: {resp.headers}, status code: {resp.status_code} {resp.reason}""" ) logger.info(f"Request: {request}") _attach_allure_step(request, resp.status_code) - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")) - with open(test_file, "wb") as file: + file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}") + with open(file_path, "wb") as file: shutil.copyfileobj(resp.raw, file) - return test_file + return file_path -@reporter.step("Upload via HTTP Gate") -def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str: +@reporter.step_deco("Upload via HTTP Gate") +def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None) -> str: """ This function upload given object through HTTP gate cid: CID to get object from @@ -157,7 +144,7 @@ def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[d request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) + resp = requests.post(request, files=files, data=body, headers=headers) if not resp.ok: raise Exception( @@ -175,7 +162,7 @@ def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[d return resp.json().get("object_id") -@reporter.step("Check is the passed object large") +@reporter.step_deco("Check is the passed object large") def is_object_large(filepath: str) -> bool: """ This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE @@ -189,7 +176,7 @@ def is_object_large(filepath: str) -> bool: return False -@reporter.step("Upload via HTTP Gate using Curl") +@reporter.step_deco("Upload via HTTP Gate using Curl") def upload_via_http_gate_curl( cid: str, filepath: str, @@ -214,16 +201,16 @@ def upload_via_http_gate_curl( large_object = is_object_large(filepath) if large_object: # pre-clean - local_shell.exec("rm pipe -f") + _cmd_run("rm pipe -f") files = f"file=@pipe;filename={os.path.basename(filepath)}" - cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}" - output = local_shell.exec(cmd, command_options) + cmd = f"mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F '{files}' {attributes} {request}" + output = _cmd_run(cmd, LONG_TIMEOUT) # clean up pipe - local_shell.exec("rm pipe") + _cmd_run("rm pipe") else: files = f"file=@{filepath};filename={os.path.basename(filepath)}" - cmd = f"curl -k -F '{files}' {attributes} {request}" - output = local_shell.exec(cmd) + cmd = f"curl -F '{files}' {attributes} {request}" + output = _cmd_run(cmd) if error_pattern: match = error_pattern.casefold() in str(output).casefold() @@ -236,22 +223,21 @@ def upload_via_http_gate_curl( return oid_re.group(1) -@retry(max_attempts=3, sleep_interval=1) -@reporter.step("Get via HTTP Gate using Curl") -def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile: +@reporter.step_deco("Get via HTTP Gate using Curl") +def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str: """ This function gets given object from HTTP gate using curl utility. cid: CID to get object from oid: object OID - node: node for request + endpoint: http gate endpoint """ - request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")) + request = f"{endpoint}/get/{cid}/{oid}" + file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") - curl = GenericCli("curl", node.host) - curl(f"-k ", f"{request} > {test_file}", shell=local_shell) + cmd = f"curl {request} > {file_path}" + _cmd_run(cmd) - return test_file + return file_path def _attach_allure_step(request: str, status_code: int, req_type="GET"): @@ -260,31 +246,26 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): reporter.attach(command_attachment, f"{req_type} Request") -@reporter.step("Try to get object and expect error") +@reporter.step_deco("Try to get object and expect error") def try_to_get_object_and_expect_error( - cid: str, - oid: str, - node: ClusterNode, - error_pattern: str, + cid: str, oid: str, error_pattern: str, endpoint: str ) -> None: try: - get_via_http_gate(cid=cid, oid=oid, node=node) + get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() assert match, f"Expected {err} to match {error_pattern}" -@reporter.step("Verify object can be get using HTTP header attribute") +@reporter.step_deco("Verify object can be get using HTTP header attribute") def get_object_by_attr_and_verify_hashes( - oid: str, - file_name: str, - cid: str, - attrs: dict, - node: ClusterNode, + oid: str, file_name: str, cid: str, attrs: dict, endpoint: str ) -> None: - got_file_path_http = get_via_http_gate(cid=cid, oid=oid, node=node) - got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs, node=node) + got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint) + got_file_path_http_attr = get_via_http_gate_by_attribute( + cid=cid, attribute=attrs, endpoint=endpoint + ) assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr) @@ -295,7 +276,7 @@ def verify_object_hash( cid: str, shell: Shell, nodes: list[StorageNode], - request_node: ClusterNode, + endpoint: str, object_getter=None, ) -> None: @@ -321,7 +302,7 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter(cid=cid, oid=oid, node=request_node) + got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -330,14 +311,18 @@ def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: st msg = "Expected hashes are equal for files {f1} and {f2}" got_file_hash_http = get_file_hash(got_file_1) assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1) - assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1) + assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format( + f1=orig_file_name, f2=got_file_1 + ) def attr_into_header(attrs: dict) -> dict: return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()} -@reporter.step("Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'") +@reporter.step_deco( + "Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'" +) def attr_into_str_header_curl(attrs: dict) -> list: headers = [] for k, v in attrs.items(): @@ -346,20 +331,24 @@ def attr_into_str_header_curl(attrs: dict) -> list: return headers -@reporter.step("Try to get object via http (pass http_request and optional attributes) and expect error") +@reporter.step_deco( + "Try to get object via http (pass http_request and optional attributes) and expect error" +) def try_to_get_object_via_passed_request_and_expect_error( cid: str, oid: str, - node: ClusterNode, error_pattern: str, + endpoint: str, http_request_path: str, attrs: Optional[dict] = None, ) -> None: try: if attrs is None: - get_via_http_gate(cid, oid, node, http_request_path) + get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path) else: - get_via_http_gate_by_attribute(cid, attrs, node, http_request_path) + get_via_http_gate_by_attribute( + cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path + ) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py deleted file mode 100644 index a9e545a..0000000 --- a/src/frostfs_testlib/steps/metrics.py +++ /dev/null @@ -1,45 +0,0 @@ -import re - -from frostfs_testlib import reporter -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.testing.test_control import wait_for_success - - -@reporter.step("Check metrics result") -@wait_for_success(interval=10) -def check_metrics_counter( - cluster_nodes: list[ClusterNode], - operator: str = "==", - counter_exp: int = 0, - parse_from_command: bool = False, - **metrics_greps: str, -): - counter_act = 0 - for cluster_node in cluster_nodes: - counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) - assert eval( - f"{counter_act} {operator} {counter_exp}" - ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}" - - -@reporter.step("Get metrics value from node: {node}") -def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str): - try: - command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps) - if parse_from_command: - metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps) - else: - metrics_counter = calc_metrics_count_from_stdout(command_result.stdout) - except RuntimeError as e: - metrics_counter = 0 - - return metrics_counter - - -@reporter.step("Parse metrics count and calc sum of result") -def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None): - if command: - result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout) - else: - result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout) - return sum(map(lambda x: int(float(x)), result)) diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py deleted file mode 100644 index 6bde2f1..0000000 --- a/src/frostfs_testlib/steps/network.py +++ /dev/null @@ -1,21 +0,0 @@ -from frostfs_testlib.shell import CommandOptions -from frostfs_testlib.storage.cluster import ClusterNode - - -class IpHelper: - @staticmethod - def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None: - shell = node.host.get_shell() - for ip, table in block_ip: - if not table: - shell.exec(f"ip r a blackhole {ip}") - continue - shell.exec(f"ip r a blackhole {ip} table {table}") - - @staticmethod - def restore_input_traffic_to_node(node: ClusterNode) -> None: - shell = node.host.get_shell() - unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout - - for active_blackhole in unlock_ip.strip().split("\n"): - shell.exec(f"ip r d {active_blackhole}") diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index 42b1fc5..aec9b8a 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -6,16 +6,21 @@ from dataclasses import dataclass from time import sleep from typing import Optional -from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import ( + FROSTFS_ADM_CONFIG_PATH, + FROSTFS_ADM_EXEC, + FROSTFS_CLI_EXEC, +) from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align +from frostfs_testlib.steps.epoch import tick_epoch from frostfs_testlib.storage.cluster import Cluster, StorageNode -from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils import datetime_utils +reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -35,7 +40,45 @@ class HealthStatus: return HealthStatus(network, health) -@reporter.step("Get Locode from random storage node") +@reporter.step_deco("Stop random storage nodes") +def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]: + """ + Shuts down the given number of randomly selected storage nodes. + Args: + number: the number of storage nodes to stop + nodes: the list of storage nodes to stop + Returns: + the list of nodes that were stopped + """ + nodes_to_stop = random.sample(nodes, number) + for node in nodes_to_stop: + node.stop_service() + return nodes_to_stop + + +@reporter.step_deco("Start storage node") +def start_storage_nodes(nodes: list[StorageNode]) -> None: + """ + The function starts specified storage nodes. + Args: + nodes: the list of nodes to start + """ + for node in nodes: + node.start_service() + + +@reporter.step_deco("Stop storage node") +def stop_storage_nodes(nodes: list[StorageNode]) -> None: + """ + The function starts specified storage nodes. + Args: + nodes: the list of nodes to start + """ + for node in nodes: + node.stop_service() + + +@reporter.step_deco("Get Locode from random storage node") def get_locode_from_random_node(cluster: Cluster) -> str: node = random.choice(cluster.services(StorageNode)) locode = node.get_un_locode() @@ -43,7 +86,7 @@ def get_locode_from_random_node(cluster: Cluster) -> str: return locode -@reporter.step("Healthcheck for storage node {node}") +@reporter.step_deco("Healthcheck for storage node {node}") def storage_node_healthcheck(node: StorageNode) -> HealthStatus: """ The function returns storage node's health status. @@ -52,27 +95,12 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus: Returns: health status as HealthStatus object. """ - - host = node.host - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - result = cli.control.healthcheck(control_endpoint) - - return HealthStatus.from_stdout(result.stdout) + command = "control healthcheck" + output = _run_control_command_with_retries(node, command) + return HealthStatus.from_stdout(output) -@reporter.step("Set status for {node}") +@reporter.step_deco("Set status for {node}") def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: """ The function sets particular status for given node. @@ -81,24 +109,11 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> status: online or offline. retries (optional, int): number of retry attempts if it didn't work from the first time """ - host = node.host - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - cli.control.set_status(control_endpoint, status) + command = f"control set-status --status {status}" + _run_control_command_with_retries(node, command, retries) -@reporter.step("Get netmap snapshot") +@reporter.step_deco("Get netmap snapshot") def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: """ The function returns string representation of netmap snapshot. @@ -112,11 +127,14 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: storage_wallet_path = node.get_wallet_path() cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config) - return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout + return cli.netmap.snapshot( + rpc_endpoint=node.get_rpc_endpoint(), + wallet=storage_wallet_path, + ).stdout -@reporter.step("Get shard list for {node}") -def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]: +@reporter.step_deco("Get shard list for {node}") +def node_shard_list(node: StorageNode) -> list[str]: """ The function returns list of shards for specified storage node. Args: @@ -124,139 +142,112 @@ def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str] Returns: list of shards. """ - host = node.host - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - result = cli.shards.list(endpoint=control_endpoint, json_mode=json) - - return re.findall(r"Shard (.*):", result.stdout) + command = "control shards list" + output = _run_control_command_with_retries(node, command) + return re.findall(r"Shard (.*):", output) -@reporter.step("Shard set for {node}") -def node_shard_set_mode(node: StorageNode, shard: list[str], mode: str) -> None: +@reporter.step_deco("Shard set for {node}") +def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: """ The function sets mode for specified shard. Args: node: node on which shard mode should be set. """ - host = node.host - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - cli.shards.set_mode(endpoint=control_endpoint, mode=mode, id=shard) + command = f"control shards set-mode --id {shard} --mode {mode}" + return _run_control_command_with_retries(node, command) -@reporter.step("Drop object from {node}") -def drop_object(node: StorageNode, cid: str, oid: str) -> None: +@reporter.step_deco("Drop object from {node}") +def drop_object(node: StorageNode, cid: str, oid: str) -> str: """ The function drops object from specified node. Args: - node: node from which object should be dropped. + node_id str: node from which object should be dropped. """ - host = node.host - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - objects = f"{cid}/{oid}" - cli.control.drop_objects(control_endpoint, objects) + command = f"control drop-objects -o {cid}/{oid}" + return _run_control_command_with_retries(node, command) -@reporter.step("Delete data from host for node {node}") +@reporter.step_deco("Delete data from host for node {node}") def delete_node_data(node: StorageNode) -> None: node.stop_service() node.host.delete_storage_node_data(node.name) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) -@reporter.step("Exclude node {node_to_exclude} from network map") -def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: +@reporter.step_deco("Exclude node {node_to_exclude} from network map") +def exclude_node_from_network_map( + node_to_exclude: StorageNode, + alive_node: StorageNode, + shell: Shell, + cluster: Cluster, +) -> None: node_netmap_key = node_to_exclude.get_wallet_public_key() storage_node_set_status(node_to_exclude, status="offline") time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) tick_epoch(shell, cluster) - wait_for_epochs_align(shell, cluster) snapshot = get_netmap_snapshot(node=alive_node, shell=shell) - assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be absent in network map" + assert ( + node_netmap_key not in snapshot + ), f"Expected node with key {node_netmap_key} to be absent in network map" -@reporter.step("Include node {node_to_include} into network map") -def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: +@reporter.step_deco("Include node {node_to_include} into network map") +def include_node_to_network_map( + node_to_include: StorageNode, + alive_node: StorageNode, + shell: Shell, + cluster: Cluster, +) -> None: storage_node_set_status(node_to_include, status="online") # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. - # First sleep can be omitted after https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/60 complete. + # First sleep can be omitted after https://github.com/TrueCloudLab/frostfs-node/issues/60 complete. time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) tick_epoch(shell, cluster) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) - await_node_in_map(node_to_include, shell, alive_node) + check_node_in_map(node_to_include, shell, alive_node) -@reporter.step("Check node {node} in network map") -def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: +@reporter.step_deco("Check node {node} in network map") +def check_node_in_map( + node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None +) -> None: alive_node = alive_node or node node_netmap_key = node.get_wallet_public_key() logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") snapshot = get_netmap_snapshot(alive_node, shell) - assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" + assert ( + node_netmap_key in snapshot + ), f"Expected node with key {node_netmap_key} to be in network map" -@wait_for_success(300, 15, title="Await node {node} in network map") -def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: - check_node_in_map(node, shell, alive_node) - - -@reporter.step("Check node {node} NOT in network map") -def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: +@reporter.step_deco("Check node {node} NOT in network map") +def check_node_not_in_map( + node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None +) -> None: alive_node = alive_node or node node_netmap_key = node.get_wallet_public_key() logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") snapshot = get_netmap_snapshot(alive_node, shell) - assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be NOT in network map" + assert ( + node_netmap_key not in snapshot + ), f"Expected node with key {node_netmap_key} to be NOT in network map" -@reporter.step("Wait for node {node} is ready") +@reporter.step_deco("Wait for node {node} is ready") def wait_for_node_to_be_ready(node: StorageNode) -> None: - timeout, attempts = 60, 15 + timeout, attempts = 30, 6 for _ in range(attempts): try: health_check = storage_node_healthcheck(node) @@ -265,11 +256,18 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None: except Exception as err: logger.warning(f"Node {node} is not ready:\n{err}") sleep(timeout) - raise AssertionError(f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds") + raise AssertionError( + f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds" + ) -@reporter.step("Remove nodes from network map trough cli-adm morph command") -def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None): +@reporter.step_deco("Remove nodes from network map trough cli-adm morph command") +def remove_nodes_from_map_morph( + shell: Shell, + cluster: Cluster, + remove_nodes: list[StorageNode], + alive_node: Optional[StorageNode] = None, +): """ Move node to the Offline state in the candidates list and tick an epoch to update the netmap using frostfs-adm @@ -288,5 +286,66 @@ def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: li if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) - frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) + frostfsadm = FrostfsAdm( + shell=remote_shell, + frostfs_adm_exec_path=FROSTFS_ADM_EXEC, + config_file=FROSTFS_ADM_CONFIG_PATH, + ) frostfsadm.morph.remove_nodes(node_netmap_keys) + + +def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str: + for attempt in range(1 + retries): # original attempt + specified retries + try: + return _run_control_command(node, command) + except AssertionError as err: + if attempt < retries: + logger.warning(f"Command {command} failed with error {err} and will be retried") + continue + raise AssertionError(f"Command {command} failed with error {err}") from err + + +def _run_control_command(node: StorageNode, command: str) -> None: + host = node.host + + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'password: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + # TODO: implement cli.control + # cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + result = shell.exec( + f"{cli_config.exec_path} {command} --endpoint {control_endpoint} " + f"--wallet {wallet_path} --config {wallet_config_path}" + ) + return result.stdout + + +@reporter.step_deco("Start services s3gate ") +def start_s3gates(cluster: Cluster) -> None: + """ + The function starts specified storage nodes. + Args: + cluster: cluster instance under test + """ + for gate in cluster.services(S3Gate): + gate.start_service() + + +@reporter.step_deco("Stop services s3gate ") +def stop_s3gates(cluster: Cluster) -> None: + """ + The function starts specified storage nodes. + Args: + cluster: cluster instance under test + """ + for gate in cluster.services(S3Gate): + gate.stop_service() diff --git a/src/frostfs_testlib/steps/payment_neogo.py b/src/frostfs_testlib/steps/payment_neogo.py index 8e78cca..6a64a5a 100644 --- a/src/frostfs_testlib/steps/payment_neogo.py +++ b/src/frostfs_testlib/steps/payment_neogo.py @@ -8,18 +8,20 @@ from typing import Optional from neo3.wallet import utils as neo3_utils from neo3.wallet import wallet as neo3_wallet -from frostfs_testlib import reporter from frostfs_testlib.cli import NeoGo +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain +from frostfs_testlib.storage.dataclasses.frostfs_services import MainChain, MorphChain from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils +reporter = get_reporter() logger = logging.getLogger("NeoLogger") EMPTY_PASSWORD = "" TX_PERSIST_TIMEOUT = 15 # seconds +ASSET_POWER_MAINCHAIN = 10**8 ASSET_POWER_SIDECHAIN = 10**12 @@ -40,7 +42,32 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) return bytes.decode(base64.b64decode(stack_data[0]["value"])) -def transaction_accepted(morph_chain: MorphChain, tx_id: str): +@reporter.step_deco("Withdraw Mainnet Gas") +def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int): + address = wallet_utils.get_last_address_from_wallet(wlt, EMPTY_PASSWORD) + scripthash = neo3_utils.address_to_script_hash(address) + + neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) + out = neogo.contract.invokefunction( + wallet=wlt, + address=address, + rpc_endpoint=main_chain.get_endpoint(), + scripthash=FROSTFS_CONTRACT, + method="withdraw", + arguments=f"{scripthash} int:{amount}", + multisig_hash=f"{scripthash}:Global", + wallet_password="", + ) + + m = re.match(r"^Sent invocation transaction (\w{64})$", out.stdout) + if m is None: + raise Exception("Can not get Tx.") + tx = m.group(1) + if not transaction_accepted(main_chain, tx): + raise AssertionError(f"TX {tx} hasn't been processed") + + +def transaction_accepted(main_chain: MainChain, tx_id: str): """ This function returns True in case of accepted TX. Args: @@ -52,8 +79,8 @@ def transaction_accepted(morph_chain: MorphChain, tx_id: str): try: for _ in range(0, TX_PERSIST_TIMEOUT): time.sleep(1) - neogo = NeoGo(shell=morph_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE) - resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=morph_chain.get_endpoint()) + neogo = NeoGo(shell=main_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE) + resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=main_chain.get_endpoint()) if resp is not None: logger.info(f"TX is accepted in block: {resp}") return True, resp @@ -63,7 +90,7 @@ def transaction_accepted(morph_chain: MorphChain, tx_id: str): return False -@reporter.step("Get FrostFS Balance") +@reporter.step_deco("Get FrostFS Balance") def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): """ This function returns FrostFS balance for given wallet. @@ -84,11 +111,11 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_ raise out -@reporter.step("Transfer Gas") +@reporter.step_deco("Transfer Gas") def transfer_gas( shell: Shell, amount: int, - morph_chain: MorphChain, + main_chain: MainChain, wallet_from_path: Optional[str] = None, wallet_from_password: Optional[str] = None, address_from: Optional[str] = None, @@ -111,16 +138,22 @@ def transfer_gas( address_to: The address of the wallet to transfer assets to. amount: Amount of gas to transfer. """ - wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() + wallet_from_path = wallet_from_path or main_chain.get_wallet_path() wallet_from_password = ( - wallet_from_password if wallet_from_password is not None else morph_chain.get_wallet_password() + wallet_from_password + if wallet_from_password is not None + else main_chain.get_wallet_password() + ) + address_from = address_from or wallet_utils.get_last_address_from_wallet( + wallet_from_path, wallet_from_password + ) + address_to = address_to or wallet_utils.get_last_address_from_wallet( + wallet_to_path, wallet_to_password ) - address_from = address_from or wallet_utils.get_last_address_from_wallet(wallet_from_path, wallet_from_password) - address_to = address_to or wallet_utils.get_last_address_from_wallet(wallet_to_path, wallet_to_password) neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) out = neogo.nep17.transfer( - rpc_endpoint=morph_chain.get_endpoint(), + rpc_endpoint=main_chain.get_endpoint(), wallet=wallet_from_path, wallet_password=wallet_from_password, amount=amount, @@ -132,12 +165,50 @@ def transfer_gas( txid = out.stdout.strip().split("\n")[-1] if len(txid) != 64: raise Exception("Got no TXID after run the command") - if not transaction_accepted(morph_chain, txid): + if not transaction_accepted(main_chain, txid): raise AssertionError(f"TX {txid} hasn't been processed") time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) -@reporter.step("Get Sidechain Balance") +@reporter.step_deco("FrostFS Deposit") +def deposit_gas( + shell: Shell, + main_chain: MainChain, + amount: int, + wallet_from_path: str, + wallet_from_password: str, +): + """ + Transferring GAS from given wallet to FrostFS contract address. + """ + # get FrostFS contract address + deposit_addr = converting_utils.contract_hash_to_address(FROSTFS_CONTRACT) + logger.info(f"FrostFS contract address: {deposit_addr}") + address_from = wallet_utils.get_last_address_from_wallet( + wallet_path=wallet_from_path, wallet_password=wallet_from_password + ) + transfer_gas( + shell=shell, + main_chain=main_chain, + amount=amount, + wallet_from_path=wallet_from_path, + wallet_from_password=wallet_from_password, + address_to=deposit_addr, + address_from=address_from, + ) + + +@reporter.step_deco("Get Mainnet Balance") +def get_mainnet_balance(main_chain: MainChain, address: str): + resp = main_chain.rpc_client.get_nep17_balances(address=address) + logger.info(f"Got getnep17balances response: {resp}") + for balance in resp["balance"]: + if balance["assethash"] == GAS_HASH: + return float(balance["amount"]) / ASSET_POWER_MAINCHAIN + return float(0) + + +@reporter.step_deco("Get Sidechain Balance") def get_sidechain_balance(morph_chain: MorphChain, address: str): resp = morph_chain.rpc_client.get_nep17_balances(address=address) logger.info(f"Got getnep17balances response: {resp}") diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py new file mode 100644 index 0000000..87f929e --- /dev/null +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -0,0 +1,247 @@ +import json +import logging +import os +import re +import uuid +from datetime import datetime, timedelta +from typing import Optional + +from dateutil.parser import parse + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC +from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT +from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus +from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate +from frostfs_testlib.utils.cli_utils import _run_with_passwd + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +@reporter.step_deco("Expected all objects are presented in the bucket") +def check_objects_in_bucket( + s3_client: S3ClientWrapper, + bucket: str, + expected_objects: list, + unexpected_objects: Optional[list] = None, +) -> None: + unexpected_objects = unexpected_objects or [] + bucket_objects = s3_client.list_objects(bucket) + assert len(bucket_objects) == len( + expected_objects + ), f"Expected {len(expected_objects)} objects in the bucket" + for bucket_object in expected_objects: + assert ( + bucket_object in bucket_objects + ), f"Expected object {bucket_object} in objects list {bucket_objects}" + + for bucket_object in unexpected_objects: + assert ( + bucket_object not in bucket_objects + ), f"Expected object {bucket_object} not in objects list {bucket_objects}" + + +@reporter.step_deco("Try to get object and got error") +def try_to_get_objects_and_expect_error( + s3_client: S3ClientWrapper, bucket: str, object_keys: list +) -> None: + for obj in object_keys: + try: + s3_client.get_object(bucket, obj) + raise AssertionError(f"Object {obj} found in bucket {bucket}") + except Exception as err: + assert "The specified key does not exist" in str( + err + ), f"Expected error in exception {err}" + + +@reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'") +def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): + s3_client.get_bucket_versioning_status(bucket) + s3_client.put_bucket_versioning(bucket, status=status) + bucket_status = s3_client.get_bucket_versioning_status(bucket) + assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" + + +def object_key_from_file_path(full_path: str) -> str: + return os.path.basename(full_path) + + +def assert_tags( + actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None +) -> None: + expected_tags = ( + [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] + ) + unexpected_tags = ( + [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] + ) + if expected_tags == []: + assert not actual_tags, f"Expected there is no tags, got {actual_tags}" + assert len(expected_tags) == len(actual_tags) + for tag in expected_tags: + assert tag in actual_tags, f"Tag {tag} must be in {actual_tags}" + for tag in unexpected_tags: + assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" + + +@reporter.step_deco("Expected all tags are presented in object") +def check_tags_by_object( + s3_client: S3ClientWrapper, + bucket: str, + key: str, + expected_tags: list, + unexpected_tags: Optional[list] = None, +) -> None: + actual_tags = s3_client.get_object_tagging(bucket, key) + assert_tags( + expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags + ) + + +@reporter.step_deco("Expected all tags are presented in bucket") +def check_tags_by_bucket( + s3_client: S3ClientWrapper, + bucket: str, + expected_tags: list, + unexpected_tags: Optional[list] = None, +) -> None: + actual_tags = s3_client.get_bucket_tagging(bucket) + assert_tags( + expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags + ) + + +def assert_object_lock_mode( + s3_client: S3ClientWrapper, + bucket: str, + file_name: str, + object_lock_mode: str, + retain_until_date: datetime, + legal_hold_status: str = "OFF", + retain_period: Optional[int] = None, +): + object_dict = s3_client.get_object(bucket, file_name, full_output=True) + assert ( + object_dict.get("ObjectLockMode") == object_lock_mode + ), f"Expected Object Lock Mode is {object_lock_mode}" + assert ( + object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status + ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" + object_retain_date = object_dict.get("ObjectLockRetainUntilDate") + retain_date = ( + parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date + ) + if retain_until_date: + assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( + "%Y-%m-%dT%H:%M:%S" + ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' + elif retain_period: + last_modify_date = object_dict.get("LastModified") + last_modify = ( + parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date + ) + assert ( + retain_date - last_modify + timedelta(seconds=1) + ).days == retain_period, f"Expected retention period is {retain_period} days" + + +def assert_s3_acl(acl_grants: list, permitted_users: str): + if permitted_users == "AllUsers": + grantees = {"AllUsers": 0, "CanonicalUser": 0} + for acl_grant in acl_grants: + if acl_grant.get("Grantee", {}).get("Type") == "Group": + uri = acl_grant.get("Grantee", {}).get("URI") + permission = acl_grant.get("Permission") + assert (uri, permission) == ( + "http://acs.amazonaws.com/groups/global/AllUsers", + "FULL_CONTROL", + ), "All Groups should have FULL_CONTROL" + grantees["AllUsers"] += 1 + if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": + permission = acl_grant.get("Permission") + assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL" + grantees["CanonicalUser"] += 1 + assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL" + assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL" + + if permitted_users == "CanonicalUser": + for acl_grant in acl_grants: + if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": + permission = acl_grant.get("Permission") + assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL" + else: + logger.error("FULL_CONTROL is given to All Users") + + +@reporter.step_deco("Init S3 Credentials") +def init_s3_credentials( + wallet_path: str, + cluster: Cluster, + s3_bearer_rules_file: str, + policy: Optional[dict] = None, +): + bucket = str(uuid.uuid4()) + + s3gate_node = cluster.services(S3Gate)[0] + gate_public_key = s3gate_node.get_wallet_public_key() + cmd = ( + f"{FROSTFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} " + f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} " + f"--peer {cluster.default_rpc_endpoint} --container-friendly-name {bucket} " + f"--bearer-rules {s3_bearer_rules_file}" + ) + if policy: + cmd += f" --container-policy {policy}'" + logger.info(f"Executing command: {cmd}") + + try: + output = _run_with_passwd(cmd) + logger.info(f"Command completed with output: {output}") + + # output contains some debug info and then several JSON structures, so we find each + # JSON structure by curly brackets (naive approach, but works while JSON is not nested) + # and then we take JSON containing secret_access_key + json_blocks = re.findall(r"\{.*?\}", output, re.DOTALL) + for json_block in json_blocks: + try: + parsed_json_block = json.loads(json_block) + if "secret_access_key" in parsed_json_block: + return ( + parsed_json_block["container_id"], + parsed_json_block["access_key_id"], + parsed_json_block["secret_access_key"], + ) + except json.JSONDecodeError: + raise AssertionError(f"Could not parse info from output\n{output}") + raise AssertionError(f"Could not find AWS credentials in output:\n{output}") + + except Exception as exc: + raise RuntimeError(f"Failed to init s3 credentials because of error\n{exc}") from exc + + +@reporter.step_deco("Delete bucket with all objects") +def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): + versioning_status = s3_client.get_bucket_versioning_status(bucket) + if versioning_status == VersioningStatus.ENABLED.value: + # From versioned bucket we should delete all versions and delete markers of all objects + objects_versions = s3_client.list_objects_versions(bucket) + if objects_versions: + s3_client.delete_object_versions_without_dm(bucket, objects_versions) + objects_delete_markers = s3_client.list_delete_markers(bucket) + if objects_delete_markers: + s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers) + + else: + # From non-versioned bucket it's sufficient to delete objects by key + objects = s3_client.list_objects(bucket) + if objects: + s3_client.delete_objects(bucket, objects) + objects_delete_markers = s3_client.list_delete_markers(bucket) + if objects_delete_markers: + s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers) + + # Delete the bucket itself + s3_client.delete_bucket(bucket) diff --git a/src/frostfs_testlib/steps/s3_helper.py b/src/frostfs_testlib/steps/s3_helper.py deleted file mode 100644 index c3092df..0000000 --- a/src/frostfs_testlib/steps/s3_helper.py +++ /dev/null @@ -1,209 +0,0 @@ -import logging -import os -from datetime import datetime, timedelta -from typing import Optional - -from dateutil.parser import parse - -from frostfs_testlib import reporter -from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus -from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.container import search_nodes_with_container -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - -logger = logging.getLogger("NeoLogger") - - -@reporter.step("Expected all objects are presented in the bucket") -def check_objects_in_bucket( - s3_client: S3ClientWrapper, - bucket: str, - expected_objects: list, - unexpected_objects: Optional[list] = None, -) -> None: - unexpected_objects = unexpected_objects or [] - bucket_objects = s3_client.list_objects(bucket) - assert len(bucket_objects) == len(expected_objects), f"Expected {len(expected_objects)} objects in the bucket" - for bucket_object in expected_objects: - assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}" - - for bucket_object in unexpected_objects: - assert bucket_object not in bucket_objects, f"Expected object {bucket_object} not in objects list {bucket_objects}" - - -@reporter.step("Try to get object and got error") -def try_to_get_objects_and_expect_error(s3_client: S3ClientWrapper, bucket: str, object_keys: list) -> None: - for obj in object_keys: - try: - s3_client.get_object(bucket, obj) - raise AssertionError(f"Object {obj} found in bucket {bucket}") - except Exception as err: - assert "The specified key does not exist" in str(err), f"Expected error in exception {err}" - - -@reporter.step("Set versioning status to '{status}' for bucket '{bucket}'") -def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): - if status == VersioningStatus.UNDEFINED: - return - - s3_client.put_bucket_versioning(bucket, status=status) - bucket_status = s3_client.get_bucket_versioning_status(bucket) - assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" - - -def object_key_from_file_path(full_path: str) -> str: - return os.path.basename(full_path) - - -def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None) -> None: - expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] - unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] - if expected_tags == []: - assert not actual_tags, f"Expected there is no tags, got {actual_tags}" - assert len(expected_tags) == len(actual_tags) - for tag in expected_tags: - assert tag in actual_tags, f"Tag {tag} must be in {actual_tags}" - for tag in unexpected_tags: - assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" - - -@reporter.step("Expected all tags are presented in object") -def check_tags_by_object( - s3_client: S3ClientWrapper, - bucket: str, - key: str, - expected_tags: list, - unexpected_tags: Optional[list] = None, -) -> None: - actual_tags = s3_client.get_object_tagging(bucket, key) - assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) - - -@reporter.step("Expected all tags are presented in bucket") -def check_tags_by_bucket( - s3_client: S3ClientWrapper, - bucket: str, - expected_tags: list, - unexpected_tags: Optional[list] = None, -) -> None: - actual_tags = s3_client.get_bucket_tagging(bucket) - assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) - - -def assert_object_lock_mode( - s3_client: S3ClientWrapper, - bucket: str, - file_name: str, - object_lock_mode: str, - retain_until_date: datetime, - legal_hold_status: str = "OFF", - retain_period: Optional[int] = None, -): - object_dict = s3_client.get_object(bucket, file_name, full_output=True) - assert object_dict.get("ObjectLockMode") == object_lock_mode, f"Expected Object Lock Mode is {object_lock_mode}" - assert ( - object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status - ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" - object_retain_date = object_dict.get("ObjectLockRetainUntilDate") - retain_date = parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date - if retain_until_date: - assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( - "%Y-%m-%dT%H:%M:%S" - ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' - elif retain_period: - last_modify_date = object_dict.get("LastModified") - last_modify = parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date - assert ( - retain_date - last_modify + timedelta(seconds=1) - ).days == retain_period, f"Expected retention period is {retain_period} days" - - -def _format_grants_as_strings(grants: list[dict]) -> list: - grantee_format = "{g_type}::{uri}:{permission}" - return set( - [ - grantee_format.format( - g_type=grant.get("Grantee", {}).get("Type", ""), - uri=grant.get("Grantee", {}).get("URI", ""), - permission=grant.get("Permission", ""), - ) - for grant in grants - ] - ) - - -@reporter.step("Verify ACL permissions") -def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True): - actual_grants = _format_grants_as_strings(actual_acl_grants) - expected_grants = _format_grants_as_strings(expected_acl_grants) - - assert expected_grants <= actual_grants, "Permissions mismatch" - if strict: - assert expected_grants == actual_grants, "Extra permissions found, must not be there" - - -@reporter.step("Delete bucket with all objects") -def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): - versioning_status = s3_client.get_bucket_versioning_status(bucket) - if versioning_status == VersioningStatus.ENABLED.value: - # From versioned bucket we should delete all versions and delete markers of all objects - objects_versions = s3_client.list_objects_versions(bucket) - if objects_versions: - s3_client.delete_object_versions_without_dm(bucket, objects_versions) - objects_delete_markers = s3_client.list_delete_markers(bucket) - if objects_delete_markers: - s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers) - - else: - # From non-versioned bucket it's sufficient to delete objects by key - objects = s3_client.list_objects(bucket) - if objects: - s3_client.delete_objects(bucket, objects) - objects_delete_markers = s3_client.list_delete_markers(bucket) - if objects_delete_markers: - s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers) - - # Delete the bucket itself - s3_client.delete_bucket(bucket) - - -@reporter.step("Search nodes bucket") -def search_nodes_with_bucket( - cluster: Cluster, - bucket_name: str, - wallet: WalletInfo, - shell: Shell, - endpoint: str, - bucket_container_resolver: BucketContainerResolver, -) -> list[ClusterNode]: - cid = None - for cluster_node in cluster.cluster_nodes: - cid = bucket_container_resolver.resolve(cluster_node, bucket_name) - if cid: - break - nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) - return nodes_list - - -def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int: - if isinstance(value, int): - return value - - if "part" not in value and "object" not in value: - return int(value) - - if object_size is not None: - value = value.replace("object", str(object_size)) - - if part_size is not None: - value = value.replace("part", str(part_size)) - - return int(eval(value)) - - -def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int: - start, end = rng.split(":") - start = get_bytes_relative_to_object(start, object_size, part_size) - end = get_bytes_relative_to_object(end, object_size, part_size) - return (start, end) if int_values else f"bytes {start}-{end}/*" diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py index 67c556d..14e25f1 100644 --- a/src/frostfs_testlib/steps/session_token.py +++ b/src/frostfs_testlib/steps/session_token.py @@ -4,18 +4,19 @@ import logging import os import uuid from dataclasses import dataclass +from enum import Enum from typing import Any, Optional -from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import json_utils, wallet_utils +reporter = get_reporter() logger = logging.getLogger("NeoLogger") UNRELATED_KEY = "unrelated key in the session" @@ -25,7 +26,7 @@ WRONG_VERB = "wrong verb of the session" INVALID_SIGNATURE = "invalid signature of the session data" -class ObjectVerb(HumanReadableEnum): +class ObjectVerb(Enum): PUT = "PUT" DELETE = "DELETE" GET = "GET" @@ -35,7 +36,7 @@ class ObjectVerb(HumanReadableEnum): SEARCH = "SEARCH" -class ContainerVerb(HumanReadableEnum): +class ContainerVerb(Enum): CREATE = "PUT" DELETE = "DELETE" SETEACL = "SETEACL" @@ -48,7 +49,7 @@ class Lifetime: iat: int = 0 -@reporter.step("Generate Session Token") +@reporter.step_deco("Generate Session Token") def generate_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -70,7 +71,9 @@ def generate_session_token( file_path = os.path.join(tokens_dir, str(uuid.uuid4())) - pub_key_64 = wallet_utils.get_wallet_public_key(session_wallet.path, session_wallet.password, "base64") + pub_key_64 = wallet_utils.get_wallet_public_key( + session_wallet.path, session_wallet.password, "base64" + ) lifetime = lifetime or Lifetime() @@ -95,7 +98,7 @@ def generate_session_token( return file_path -@reporter.step("Generate Session Token For Container") +@reporter.step_deco("Generate Session Token For Container") def generate_container_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -122,7 +125,11 @@ def generate_container_session_token( "container": { "verb": verb.value, "wildcard": cid is None, - **({"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} if cid is not None else {}), + **( + {"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} + if cid is not None + else {} + ), }, } @@ -135,7 +142,7 @@ def generate_container_session_token( ) -@reporter.step("Generate Session Token For Object") +@reporter.step_deco("Generate Session Token For Object") def generate_object_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -177,7 +184,7 @@ def generate_object_session_token( ) -@reporter.step("Get signed token for container session") +@reporter.step_deco("Get signed token for container session") def get_container_signed_token( owner_wallet: WalletInfo, user_wallet: WalletInfo, @@ -199,7 +206,7 @@ def get_container_signed_token( return sign_session_token(shell, session_token_file, owner_wallet) -@reporter.step("Get signed token for object session") +@reporter.step_deco("Get signed token for object session") def get_object_signed_token( owner_wallet: WalletInfo, user_wallet: WalletInfo, @@ -226,11 +233,12 @@ def get_object_signed_token( return sign_session_token(shell, session_token_file, owner_wallet) -@reporter.step("Create Session Token") +@reporter.step_deco("Create Session Token") def create_session_token( shell: Shell, owner: str, - wallet: WalletInfo, + wallet_path: str, + wallet_password: str, rpc_endpoint: str, ) -> str: """ @@ -245,18 +253,19 @@ def create_session_token( The path to the generated session token file. """ session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC) frostfscli.session.create( rpc_endpoint=rpc_endpoint, address=owner, + wallet=wallet_path, + wallet_password=wallet_password, out=session_token, - wallet=wallet.path, ) return session_token -@reporter.step("Sign Session Token") -def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo) -> str: +@reporter.step_deco("Sign Session Token") +def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str: """ This function signs the session token by the given wallet. @@ -269,6 +278,10 @@ def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo The path to the signed token. """ signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) - frostfscli.util.sign_session_token(session_token_file, signed_token_file) + frostfscli = FrostfsCli( + shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG + ) + frostfscli.util.sign_session_token( + wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file + ) return signed_token_file diff --git a/src/frostfs_testlib/steps/storage_object.py b/src/frostfs_testlib/steps/storage_object.py index 4b4b2a6..7776754 100644 --- a/src/frostfs_testlib/steps/storage_object.py +++ b/src/frostfs_testlib/steps/storage_object.py @@ -3,7 +3,7 @@ from time import sleep import pytest -from frostfs_testlib import reporter +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import delete_object, get_object @@ -12,13 +12,16 @@ from frostfs_testlib.steps.tombstone import verify_head_tombstone from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo +reporter = get_reporter() logger = logging.getLogger("NeoLogger") CLEANUP_TIMEOUT = 10 -@reporter.step("Delete Objects") -def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster) -> None: +@reporter.step_deco("Delete Objects") +def delete_objects( + storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster +) -> None: """ Deletes given storage objects. @@ -30,14 +33,14 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust with reporter.step("Delete objects"): for storage_object in storage_objects: storage_object.tombstone = delete_object( - storage_object.wallet, + storage_object.wallet_file_path, storage_object.cid, storage_object.oid, shell=shell, endpoint=cluster.default_rpc_endpoint, ) verify_head_tombstone( - wallet=storage_object.wallet, + wallet_path=storage_object.wallet_file_path, cid=storage_object.cid, oid_ts=storage_object.tombstone, oid=storage_object.oid, @@ -52,7 +55,7 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust for storage_object in storage_objects: with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED): get_object( - storage_object.wallet, + storage_object.wallet_file_path, storage_object.cid, storage_object.oid, shell=shell, diff --git a/src/frostfs_testlib/steps/storage_policy.py b/src/frostfs_testlib/steps/storage_policy.py index acc113f..eca25d2 100644 --- a/src/frostfs_testlib/steps/storage_policy.py +++ b/src/frostfs_testlib/steps/storage_policy.py @@ -6,21 +6,22 @@ """ import logging -from frostfs_testlib import reporter +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.complex_object_actions import get_last_object from frostfs_testlib.storage.cluster import StorageNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import string_utils +reporter = get_reporter() logger = logging.getLogger("NeoLogger") -# TODO: Unused, remove or make use of -@reporter.step("Get Object Copies") -def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: +@reporter.step_deco("Get Object Copies") +def get_object_copies( + complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> int: """ The function performs requests to all nodes of the container and finds out if they store a copy of the object. The procedure is @@ -44,8 +45,10 @@ def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, s ) -@reporter.step("Get Simple Object Copies") -def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: +@reporter.step_deco("Get Simple Object Copies") +def get_simple_object_copies( + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> int: """ To figure out the number of a simple object copies, only direct HEAD requests should be made to the every node of the container. @@ -63,7 +66,9 @@ def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shel copies = 0 for node in nodes: try: - response = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) + response = head_object( + wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True + ) if response: logger.info(f"Found object {oid} on node {node}") copies += 1 @@ -73,8 +78,10 @@ def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shel return copies -@reporter.step("Get Complex Object Copies") -def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: +@reporter.step_deco("Get Complex Object Copies") +def get_complex_object_copies( + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> int: """ To figure out the number of a complex object copies, we firstly need to retrieve its Last object. We consider that the number of @@ -95,8 +102,10 @@ def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: She return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) -@reporter.step("Get Nodes With Object") -def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: +@reporter.step_deco("Get Nodes With Object") +def get_nodes_with_object( + cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> list[StorageNode]: """ The function returns list of nodes which store the given object. @@ -111,7 +120,8 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN nodes_list = [] for node in nodes: - wallet = WalletInfo.from_node(node) + wallet = node.get_wallet_path() + wallet_config = node.get_wallet_config_path() try: res = head_object( wallet, @@ -120,6 +130,7 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True, + wallet_config=wallet_config, ) if res is not None: logger.info(f"Found object {oid} on node {node}") @@ -130,8 +141,10 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN return nodes_list -@reporter.step("Get Nodes Without Object") -def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: +@reporter.step_deco("Get Nodes Without Object") +def get_nodes_without_object( + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> list[StorageNode]: """ The function returns list of nodes which do not store the given object. @@ -147,7 +160,9 @@ def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shel nodes_list = [] for node in nodes: try: - res = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) + res = head_object( + wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True + ) if res is None: nodes_list.append(node) except Exception as err: diff --git a/src/frostfs_testlib/steps/tombstone.py b/src/frostfs_testlib/steps/tombstone.py index 27f75d5..a46cf77 100644 --- a/src/frostfs_testlib/steps/tombstone.py +++ b/src/frostfs_testlib/steps/tombstone.py @@ -1,24 +1,41 @@ +import json import logging -from frostfs_testlib import reporter +from neo3.wallet import wallet + +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step("Verify Head Tombstone") -def verify_head_tombstone(wallet: WalletInfo, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): - header = head_object(wallet, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] +@reporter.step_deco("Verify Head Tombstone") +def verify_head_tombstone( + wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str +): + header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] logger.info(f"Header Session OIDs is {s_oid}") logger.info(f"OID is {oid}") assert header["containerID"] == cid, "Tombstone Header CID is wrong" - assert header["ownerID"] == wallet.get_address_from_json(0), "Tombstone Owner ID is wrong" + + with open(wallet_path, "r") as file: + wlt_data = json.loads(file.read()) + wlt = wallet.Wallet.from_json(wlt_data, password="") + addr = wlt.accounts[0].address + + assert header["ownerID"] == addr, "Tombstone Owner ID is wrong" assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" - assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE" - assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong" - assert oid in header["sessionToken"]["body"]["object"]["target"]["objects"], "Header Session OID is wrong" + assert ( + header["sessionToken"]["body"]["object"]["verb"] == "DELETE" + ), "Header Session Type isn't DELETE" + assert ( + header["sessionToken"]["body"]["object"]["target"]["container"] == cid + ), "Header Session ID is wrong" + assert ( + oid in header["sessionToken"]["body"]["object"]["target"]["objects"] + ), "Header Session OID is wrong" diff --git a/src/frostfs_testlib/storage/__init__.py b/src/frostfs_testlib/storage/__init__.py index cbbef84..531964c 100644 --- a/src/frostfs_testlib/storage/__init__.py +++ b/src/frostfs_testlib/storage/__init__.py @@ -1,7 +1,25 @@ +from frostfs_testlib.storage.constants import _FrostfsServicesNames +from frostfs_testlib.storage.dataclasses.frostfs_services import ( + HTTPGate, + InnerRing, + MainChain, + MorphChain, + S3Gate, + StorageNode, +) from frostfs_testlib.storage.service_registry import ServiceRegistry __class_registry = ServiceRegistry() +# Register default public services +__class_registry.register_service(_FrostfsServicesNames.STORAGE, StorageNode) +__class_registry.register_service(_FrostfsServicesNames.INNER_RING, InnerRing) +__class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain) +__class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate) +__class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate) +# # TODO: Remove this since we are no longer have main chain +__class_registry.register_service(_FrostfsServicesNames.MAIN_CHAIN, MainChain) + def get_service_registry() -> ServiceRegistry: """Returns registry with registered classes related to cluster and cluster nodes. diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index b67e34d..2158dc2 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -2,18 +2,19 @@ import random import re import yaml -from yarl import URL -from frostfs_testlib import reporter from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.storage import get_service_registry -from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.constants import ConfigAttributes -from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.metrics import Metrics +from frostfs_testlib.storage.dataclasses.frostfs_services import ( + HTTPGate, + InnerRing, + MorphChain, + S3Gate, + StorageNode, +) from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry @@ -25,13 +26,11 @@ class ClusterNode: class_registry: ServiceRegistry id: int host: Host - metrics: Metrics def __init__(self, host: Host, id: int) -> None: self.host = host self.id = id self.class_registry = get_service_registry() - self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint()) @property def host_ip(self): @@ -74,7 +73,6 @@ class ClusterNode: def s3_gate(self) -> S3Gate: return self.service(S3Gate) - # TODO: Deprecated. Use config with ServiceConfigurationYml interface def get_config(self, config_file_path: str) -> dict: shell = self.host.get_shell() @@ -84,16 +82,12 @@ class ClusterNode: config = yaml.safe_load(config_text) return config - # TODO: Deprecated. Use config with ServiceConfigurationYml interface def save_config(self, new_config: dict, config_file_path: str) -> None: shell = self.host.get_shell() config_str = yaml.dump(new_config) shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") - def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: - return self.service(service_type).config - def service(self, service_type: type[ServiceClass]) -> ServiceClass: """ Get a service cluster node of specified type. @@ -109,7 +103,7 @@ class ClusterNode: service_entry = self.class_registry.get_entry(service_type) service_name = service_entry["hosting_service_name"] - pattern = f"{service_name}_{self.id:02}" + pattern = f"{service_name}{self.id:02}" config = self.host.get_service_config(pattern) return service_type( @@ -118,42 +112,10 @@ class ClusterNode: self.host, ) - @property - def services(self) -> list[NodeBase]: - svcs: list[NodeBase] = [] - svcs_names_on_node = [svc.name for svc in self.host.config.services] - for entry in self.class_registry._class_mapping.values(): - hosting_svc_name = entry["hosting_service_name"] - pattern = f"{hosting_svc_name}_{self.id:02}" - if pattern in svcs_names_on_node: - config = self.host.get_service_config(pattern) - svcs.append( - entry["cls"]( - self.id, - config.name, - self.host, - ) - ) - - return svcs - - def get_all_interfaces(self) -> dict[str, str]: - return self.host.config.interfaces - - def get_interface(self, interface: Interfaces) -> str: - return self.host.config.interfaces[interface.value] - - def get_data_interfaces(self) -> list[str]: - return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface] - - def get_data_interface(self, search_interface: str) -> list[str]: - return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_interface == interface] - - def get_internal_interfaces(self) -> list[str]: - return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "internal" in name_interface] - - def get_internal_interface(self, search_internal: str) -> list[str]: - return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_internal == interface] + def get_list_of_services(self) -> list[str]: + return [ + config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services + ] class Cluster: @@ -203,40 +165,6 @@ class Cluster: def morph_chain(self) -> list[MorphChain]: return self.services(MorphChain) - def nodes(self, services: list[ServiceClass]) -> list[ClusterNode]: - """ - Resolve which cluster nodes hosting the specified services. - - Args: - services: list of services to resolve hosting cluster nodes. - - Returns: - list of cluster nodes which host specified services. - """ - - cluster_nodes = set() - for service in services: - cluster_nodes.update([node for node in self.cluster_nodes if node.service(type(service)) == service]) - - return list(cluster_nodes) - - def node(self, service: ServiceClass) -> ClusterNode: - """ - Resolve single cluster node hosting the specified service. - - Args: - services: list of services to resolve hosting cluster nodes. - - Returns: - list of cluster nodes which host specified services. - """ - - nodes = [node for node in self.cluster_nodes if node.service(type(service)) == service] - if not len(nodes): - raise RuntimeError(f"Cannot find service {service} on any node") - - return nodes[0] - def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]: """ Get all services in a cluster of specified type. @@ -253,13 +181,13 @@ class Cluster: service_name = service["hosting_service_name"] cls: type[NodeBase] = service["cls"] - pattern = f"{service_name}_\d*$" + pattern = f"{service_name}\d*$" configs = self.hosting.find_service_configs(pattern) found_nodes = [] for config in configs: # config.name is something like s3-gate01. Cut last digits to know service type - service_type = re.findall("(.*)_\d+", config.name)[0] + service_type = re.findall(".*\D", config.name)[0] # exclude unsupported services if service_type != service_name: continue @@ -322,8 +250,3 @@ class Cluster: def get_morph_endpoints(self) -> list[str]: nodes: list[MorphChain] = self.services(MorphChain) return [node.get_endpoint() for node in nodes] - - def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]: - cluster_nodes = [node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips] - with reporter.step(f"Return cluster nodes - {cluster_nodes}"): - return cluster_nodes diff --git a/src/frostfs_testlib/storage/configuration/interfaces.py b/src/frostfs_testlib/storage/configuration/interfaces.py deleted file mode 100644 index b2bc683..0000000 --- a/src/frostfs_testlib/storage/configuration/interfaces.py +++ /dev/null @@ -1,65 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any - - -class ServiceConfigurationYml(ABC): - """ - Class to manipulate yml configuration for service - """ - - def _find_option(self, key: str, data: dict): - tree = key.split(":") - current = data - for node in tree: - if isinstance(current, list) and len(current) - 1 >= int(node): - current = current[int(node)] - continue - - if node not in current: - return None - - current = current[node] - - return current - - def _set_option(self, key: str, value: Any, data: dict): - tree = key.split(":") - current = data - for node in tree[:-1]: - if isinstance(current, list) and len(current) - 1 >= int(node): - current = current[int(node)] - continue - - if node not in current: - current[node] = {} - - current = current[node] - - current[tree[-1]] = value - - @abstractmethod - def get(self, key: str) -> str: - """ - Get parameter value from current configuration - - Args: - key: key of the parameter in yaml format like 'storage:shard:default:resync_metabase' - - Returns: - value of the parameter - """ - - @abstractmethod - def set(self, values: dict[str, Any]): - """ - Sets parameters to configuration - - Args: - values: dict where key is the key of the parameter in yaml format like 'storage:shard:default:resync_metabase' and value is the value of the option to set - """ - - @abstractmethod - def revert(self): - """ - Revert changes - """ diff --git a/src/frostfs_testlib/storage/configuration/service_configuration.py b/src/frostfs_testlib/storage/configuration/service_configuration.py deleted file mode 100644 index fddd64a..0000000 --- a/src/frostfs_testlib/storage/configuration/service_configuration.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -import re -from typing import Any - -import yaml - -from frostfs_testlib import reporter -from frostfs_testlib.shell.interfaces import CommandOptions, Shell -from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml - - -def extend_dict(extend_me: dict, extend_by: dict): - if isinstance(extend_by, dict): - for k, v in extend_by.items(): - if k in extend_me: - extend_dict(extend_me.get(k), v) - else: - extend_me[k] = v - else: - extend_me += extend_by - - -class ServiceConfiguration(ServiceConfigurationYml): - def __init__(self, service_name: str, shell: Shell, config_dir: str, main_config_path: str) -> None: - self.service_name = service_name - self.shell = shell - self.main_config_path = main_config_path - self.confd_path = os.path.join(config_dir, "conf.d") - self.custom_file = os.path.join(self.confd_path, "99_changes.yml") - - def _path_exists(self, path: str) -> bool: - return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code - - def _get_config_files(self): - config_files = [self.main_config_path] - - if self._path_exists(self.confd_path): - files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split() - # Sorting files in backwards order from latest to first one - config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0]))) - - return config_files - - def _get_configuration(self, config_files: list[str]) -> dict: - if not config_files: - return [{}] - - splitter = "+++++" - files_str = " ".join(config_files) - all_content = self.shell.exec( - f"echo Getting config files; for file in {files_str}; do (echo {splitter}; sudo cat ${{file}}); done" - ).stdout - files_content = all_content.split("+++++")[1:] - files_data = [yaml.safe_load(file_content) for file_content in files_content] - - mergedData = {} - for data in files_data: - extend_dict(mergedData, data) - - return mergedData - - def get(self, key: str) -> str | Any: - with reporter.step(f"Get {key} configuration value for {self.service_name}"): - config_files = self._get_config_files() - configuration = self._get_configuration(config_files) - result = self._find_option(key, configuration) - return result - - def set(self, values: dict[str, Any]): - with reporter.step(f"Change configuration for {self.service_name}"): - if not self._path_exists(self.confd_path): - self.shell.exec(f"mkdir {self.confd_path}") - - if self._path_exists(self.custom_file): - data = self._get_configuration([self.custom_file]) - else: - data = {} - - for key, value in values.items(): - self._set_option(key, value, data) - - content = yaml.dump(data) - self.shell.exec(f"echo '{content}' | sudo tee {self.custom_file}") - self.shell.exec(f"chmod 777 {self.custom_file}") - - def revert(self): - with reporter.step(f"Revert changed options for {self.service_name}"): - self.shell.exec(f"rm -rf {self.custom_file}") diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 2e49208..95ea3f2 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -3,28 +3,20 @@ class ConfigAttributes: WALLET_PASSWORD = "wallet_password" WALLET_PATH = "wallet_path" WALLET_CONFIG = "wallet_config" - CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" - WORKING_DIR = "working_dir" - SHARD_CONFIG_PATH = "shard_config_path" - LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" - LOCAL_WALLET_CONFIG = "local_wallet_config_path" - REMOTE_WALLET_CONFIG = "remote_wallet_config_path" + LOCAL_WALLET_CONFIG = "local_config_path" ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" - ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_INTERNAL = "endpoint_internal0" - ENDPOINT_PROMETHEUS = "endpoint_prometheus" - ENDPOINT_PPROF = "endpoint_pprof" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" -class PlacementRule: - DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" - SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" - REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" - REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X" - DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" - EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X" +class _FrostfsServicesNames: + STORAGE = "s" + S3_GATE = "s3-gate" + HTTP_GATE = "http-gate" + MORPH_CHAIN = "morph-chain" + INNER_RING = "ir" + MAIN_CHAIN = "main-chain" diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 5628282..a2336be 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -1,54 +1,84 @@ import copy -from datetime import datetime +import time import frostfs_testlib.resources.optionals as optionals -from frostfs_testlib import reporter -from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner -from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType +from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.load_config import ( + EndpointSelectionStrategy, + K6ProcessAllocationStrategy, + LoadParams, + LoadScenario, + LoadType, +) from frostfs_testlib.load.load_report import LoadReport +from frostfs_testlib.load.load_steps import init_s3_client, prepare_k6_instances from frostfs_testlib.load.load_verifiers import LoadVerifier +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.load_params import ( + K6_TEARDOWN_PERIOD, + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_USER, + LOAD_NODES, +) +from frostfs_testlib.shell.interfaces import SshCredentials from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.testing.parallel import parallel +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import run_optionally +from frostfs_testlib.utils import datetime_utils + +reporter = get_reporter() class BackgroundLoadController: + k6_instances: list[K6] k6_dir: str load_params: LoadParams original_load_params: LoadParams + load_nodes: list[str] verification_params: LoadParams - cluster_nodes: list[ClusterNode] nodes_under_load: list[ClusterNode] load_counter: int + ssh_credentials: SshCredentials + loaders_wallet: WalletInfo load_summaries: dict endpoints: list[str] - runner: ScenarioRunner - started: bool - load_reporters: list[LoadReport] def __init__( self, k6_dir: str, load_params: LoadParams, - cluster_nodes: list[ClusterNode], + loaders_wallet: WalletInfo, nodes_under_load: list[ClusterNode], - runner: ScenarioRunner, ) -> None: self.k6_dir = k6_dir self.original_load_params = load_params self.load_params = copy.deepcopy(self.original_load_params) - self.cluster_nodes = cluster_nodes self.nodes_under_load = nodes_under_load self.load_counter = 1 - self.runner = runner - self.started = False - self.load_reporters = [] + self.load_nodes = LOAD_NODES + self.loaders_wallet = loaders_wallet + if load_params.endpoint_selection_strategy is None: raise RuntimeError("endpoint_selection_strategy should not be None") + self.endpoints = self._get_endpoints( + load_params.load_type, load_params.endpoint_selection_strategy + ) + + self.ssh_credentials = SshCredentials( + LOAD_NODE_SSH_USER, + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + ) + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) - def _get_endpoints(self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy): + def _get_endpoints( + self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy + ): all_endpoints = { LoadType.gRPC: { EndpointSelectionStrategy.ALL: list( @@ -59,20 +89,26 @@ class BackgroundLoadController: ) ), EndpointSelectionStrategy.FIRST: list( - set(node_under_load.service(StorageNode).get_rpc_endpoint() for node_under_load in self.nodes_under_load) + set( + node_under_load.service(StorageNode).get_rpc_endpoint() + for node_under_load in self.nodes_under_load + ) ), }, # for some reason xk6 appends http protocol on its own LoadType.S3: { EndpointSelectionStrategy.ALL: list( set( - endpoint + endpoint.replace("http://", "") for node_under_load in self.nodes_under_load for endpoint in node_under_load.service(S3Gate).get_all_endpoints() ) ), EndpointSelectionStrategy.FIRST: list( - set(node_under_load.service(S3Gate).get_endpoint() for node_under_load in self.nodes_under_load) + set( + node_under_load.service(S3Gate).get_endpoint().replace("http://", "") + for node_under_load in self.nodes_under_load + ) ), }, } @@ -80,37 +116,69 @@ class BackgroundLoadController: return all_endpoints[load_type][endpoint_selection_strategy] @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Init k6 instances") - def init_k6(self): - self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) - self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Prepare load instances") + @reporter.step_deco("Prepare background load instances") def prepare(self): - self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) - self.init_k6() + if self.load_params.load_type == LoadType.S3: + init_s3_client( + self.load_nodes, + self.load_params, + self.k6_dir, + self.ssh_credentials, + self.nodes_under_load, + self.loaders_wallet, + ) - def append_reporter(self, load_report: LoadReport): - self.load_reporters.append(load_report) + self._prepare(self.load_params) + + def _prepare(self, load_params: LoadParams): + self.k6_instances = prepare_k6_instances( + load_nodes=LOAD_NODES, + ssh_credentials=self.ssh_credentials, + k6_dir=self.k6_dir, + load_params=load_params, + endpoints=self.endpoints, + loaders_wallet=self.loaders_wallet, + ) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Start background load") def start(self): - with reporter.step(f"Start load on nodes {self.nodes_under_load}"): - self.runner.start() - self.started = True + if self.load_params.preset is None: + raise RuntimeError("Preset should not be none at the moment of start") + + with reporter.step( + f"Start background load on nodes {self.nodes_under_load}: " + f"writers = {self.load_params.writers}, " + f"obj_size = {self.load_params.object_size}, " + f"load_time = {self.load_params.load_time}, " + f"prepare_json = {self.load_params.preset.pregen_json}, " + f"endpoints = {self.endpoints}" + ): + for k6_load_instance in self.k6_instances: + k6_load_instance.start() + + wait_after_start_time = datetime_utils.parse_time(self.load_params.setup_timeout) + 5 + with reporter.step( + f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" + ): + time.sleep(wait_after_start_time) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Stop load") + @reporter.step_deco("Stop background load") def stop(self): - self.runner.stop() + for k6_load_instance in self.k6_instances: + k6_load_instance.stop() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True) - def is_running(self) -> bool: - return self.runner.is_running + def is_running(self): + for k6_load_instance in self.k6_instances: + if not k6_load_instance.is_running: + return False + + return True @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Reset load") + @reporter.step_deco("Reset background load") def _reset_for_consequent_load(self): """This method is required if we want to run multiple loads during test run. Raise load counter by 1 and append it to load_id @@ -120,106 +188,89 @@ class BackgroundLoadController: self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Startup load") + @reporter.step_deco("Startup background load") def startup(self): self.prepare() - self.preset() self.start() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def preset(self): - self.runner.preset() - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Stop and get results of load") - def teardown(self): - if not self.started: + @reporter.step_deco("Stop and get results of background load") + def teardown(self, load_report: LoadReport = None): + if not self.k6_instances: return self.stop() - self.load_summaries = self._get_results() - self.started = False - - start_time = min(self._get_start_times()) - end_time = max(self._get_end_times()) - - for load_report in self.load_reporters: - load_report.set_start_time(start_time) - load_report.set_end_time(end_time) + self.load_summaries = self.get_results() + self.k6_instances = [] + if load_report: load_report.add_summaries(self.load_summaries) - def _get_start_times(self) -> list[datetime]: - futures = parallel([k6.get_start_time for k6 in self.runner.get_k6_instances()]) - return [future.result() for future in futures] - - def _get_end_times(self) -> list[datetime]: - futures = parallel([k6.get_end_time for k6 in self.runner.get_k6_instances()]) - return [future.result() for future in futures] - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Run post-load verification") + @reporter.step_deco("Verify results of background load") def verify(self): try: - load_issues = self._collect_load_issues() if self.load_params.verify: - load_issues.extend(self._run_verify_scenario()) - - assert not load_issues, "\n".join(load_issues) + self.verification_params = LoadParams( + verify_clients=self.load_params.verify_clients, + scenario=LoadScenario.VERIFY, + registry_file=self.load_params.registry_file, + verify_time=self.load_params.verify_time, + load_type=self.load_params.load_type, + load_id=self.load_params.load_id, + working_dir=self.load_params.working_dir, + endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, + k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, + ) + self._run_verify_scenario() + verification_summaries = self.get_results() + self.verify_summaries(self.load_summaries, verification_summaries) finally: self._reset_for_consequent_load() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Collect load issues") - def _collect_load_issues(self): + @reporter.step_deco("Verify summaries from k6") + def verify_summaries(self, load_summaries: dict, verification_summaries: dict): verifier = LoadVerifier(self.load_params) - return verifier.collect_load_issues(self.load_summaries) + for node_or_endpoint in load_summaries: + with reporter.step(f"Verify load summaries for {node_or_endpoint}"): + verifier.verify_summaries( + load_summaries[node_or_endpoint], verification_summaries[node_or_endpoint] + ) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def wait_until_finish(self, soft_timeout: int = 0): - self.runner.wait_until_finish(soft_timeout) + def wait_until_finish(self): + if self.load_params.load_time is None: + raise RuntimeError("LoadTime should not be none") + + for k6_instance in self.k6_instances: + k6_instance.wait_until_finished(self.load_params.load_time + int(K6_TEARDOWN_PERIOD)) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step("Verify loaded objects") - def _run_verify_scenario(self) -> list[str]: - self.verification_params = LoadParams( - verify_clients=self.load_params.verify_clients, - scenario=LoadScenario.VERIFY, - read_from=self.load_params.read_from, - registry_file=self.load_params.registry_file, - verify_time=self.load_params.verify_time, - custom_registry=self.load_params.custom_registry, - load_type=self.load_params.load_type, - load_id=self.load_params.load_id, - vu_init_time=0, - working_dir=self.load_params.working_dir, - endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, - k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, - setup_timeout=self.load_params.setup_timeout, - ) - - if self.verification_params.custom_registry: - self.verification_params.registry_file = self.load_params.custom_registry - + @reporter.step_deco("Run verify scenario for background load") + def _run_verify_scenario(self): if self.verification_params.verify_time is None: raise RuntimeError("verify_time should not be none") - self.runner.init_k6_instances(self.verification_params, self.endpoints, self.k6_dir) - with reporter.step("Run verify scenario"): - self.runner.start() - self.runner.wait_until_finish() - - with reporter.step("Collect verify issues"): - verification_summaries = self._get_results() - verifier = LoadVerifier(self.load_params) - return verifier.collect_verify_issues(self.load_summaries, verification_summaries) + self._prepare(self.verification_params) + with reporter.step("Run verify background load data"): + for k6_verify_instance in self.k6_instances: + k6_verify_instance.start() + k6_verify_instance.wait_until_finished(self.verification_params.verify_time) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def _get_results(self) -> dict: - with reporter.step(f"Get {self.load_params.scenario.value} scenario results"): - return self.runner.get_results() + @reporter.step_deco("K6 run results") + def get_results(self) -> dict: + results = {} + for k6_instance in self.k6_instances: + if k6_instance.load_params.k6_process_allocation_strategy is None: + raise RuntimeError("k6_process_allocation_strategy should not be none") - def __str__(self) -> str: - return self.load_params.__str__() + result = k6_instance.get_results() + keys_map = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.load_node, + K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0], + } + key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] + results[key] = result - def __repr__(self) -> str: - return repr(self.load_params) + return results diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 51aaefb..705caf0 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,481 +1,165 @@ -import itertools -import logging import time -from datetime import datetime, timezone -from typing import TypeVar +from concurrent.futures import ThreadPoolExecutor import frostfs_testlib.resources.optionals as optionals -from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.cli.netmap_parser import NetmapParser -from frostfs_testlib.healthcheck.interfaces import Healthcheck -from frostfs_testlib.hosting.interfaces import HostStatus -from frostfs_testlib.plugins import load_all -from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import MORPH_BLOCK_TIME -from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider -from frostfs_testlib.steps.network import IpHelper -from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph -from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell import CommandOptions, Shell +from frostfs_testlib.steps import epoch +from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController -from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing import parallel -from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success -from frostfs_testlib.utils.datetime_utils import parse_time +from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.utils.failover_utils import ( + wait_all_storage_nodes_returned, + wait_for_host_offline, + wait_for_host_online, + wait_for_node_online, +) -logger = logging.getLogger("NeoLogger") - - -class StateManager: - def __init__(self, cluster_state_controller: "ClusterStateController") -> None: - self.csc = cluster_state_controller - - -StateManagerClass = TypeVar("StateManagerClass", bound=StateManager) +reporter = get_reporter() class ClusterStateController: - def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: + def __init__(self, shell: Shell, cluster: Cluster) -> None: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} - self.dropped_traffic: set[ClusterNode] = set() - self.excluded_from_netmap: list[StorageNode] = [] - self.stopped_services: set[NodeBase] = set() + self.stopped_storage_nodes: list[ClusterNode] = [] self.cluster = cluster - self.healthcheck = healthcheck self.shell = shell - self.suspended_services: dict[str, list[ClusterNode]] = {} - self.nodes_with_modified_interface: list[ClusterNode] = [] - self.managers: list[StateManagerClass] = [] - - # TODO: move all functionality to managers - managers = set(load_all(group="frostfs.testlib.csc_managers")) - for manager in managers: - self.managers.append(manager(self)) - - def manager(self, manager_type: type[StateManagerClass]) -> StateManagerClass: - for manager in self.managers: - # Subclasses here for the future if we have overriding subclasses of base interface - if issubclass(type(manager), manager_type): - return manager - - def _get_stopped_by_node(self, node: ClusterNode) -> set[NodeBase]: - stopped_by_node = [svc for svc in self.stopped_services if svc.host == node.host] - return set(stopped_by_node) - - def _get_stopped_by_type(self, service_type: type[ServiceClass]) -> set[ServiceClass]: - stopped_by_type = [svc for svc in self.stopped_services if isinstance(svc, service_type)] - return set(stopped_by_type) - - def _from_stopped_nodes(self, service_type: type[ServiceClass]) -> set[ServiceClass]: - stopped_on_nodes = set([node.service(service_type) for node in self.stopped_nodes]) - return set(stopped_on_nodes) - - def _get_online(self, service_type: type[ServiceClass]) -> set[ServiceClass]: - stopped_svc = self._get_stopped_by_type(service_type).union(self._from_stopped_nodes(service_type)) - online_svc = set(self.cluster.services(service_type)) - stopped_svc - return online_svc @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop host of node {node}") + @reporter.step_deco("Stop host of node {node}") def stop_node_host(self, node: ClusterNode, mode: str): - # Drop ssh connection for this node before shutdown - provider = SshConnectionProvider() - provider.drop(node.host_ip) - - self.stopped_nodes.append(node) with reporter.step(f"Stop host {node.host.config.address}"): node.host.stop_host(mode=mode) - self._wait_for_host_offline(node) + wait_for_host_offline(self.shell, node.storage_node) + self.stopped_nodes.append(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Shutdown whole cluster") + @reporter.step_deco("Shutdown whole cluster") def shutdown_cluster(self, mode: str, reversed_order: bool = False): - nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - - # Drop all ssh connections before shutdown - provider = SshConnectionProvider() - provider.drop_all() - + nodes = ( + reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + ) for node in nodes: with reporter.step(f"Stop host {node.host.config.address}"): self.stopped_nodes.append(node) node.host.stop_host(mode=mode) for node in nodes: - self._wait_for_host_offline(node) + wait_for_host_offline(self.shell, node.storage_node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start host of node {node}") - def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True): + @reporter.step_deco("Stop all storage services on cluster") + def stop_all_storage_services(self, reversed_order: bool = False): + nodes = ( + reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + ) + + for node in nodes: + self.stop_storage_service(node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start host of node {node}") + def start_node_host(self, node: ClusterNode): with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() - self._wait_for_host_online(node) - self.stopped_nodes.remove(node) - if startup_healthcheck: - self.wait_startup_healthcheck() + wait_for_host_online(self.shell, node.storage_node) + wait_for_node_online(node.storage_node) + self.stopped_nodes.remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start stopped hosts") + @reporter.step_deco("Start stopped hosts") def start_stopped_hosts(self, reversed_order: bool = False): - if not self.stopped_nodes: - return - nodes = reversed(self.stopped_nodes) if reversed_order else self.stopped_nodes for node in nodes: with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() - self.stopped_services.difference_update(self._get_stopped_by_node(node)) - self.stopped_nodes = [] - with reporter.step("Wait for all nodes to go online"): - parallel(self._wait_for_host_online, self.cluster.cluster_nodes) - - self.wait_after_storage_startup() + wait_all_storage_nodes_returned(self.shell, self.cluster) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Detach disk {device} at {mountpoint} on node {node}") + @reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}") def detach_disk(self, node: StorageNode, device: str, mountpoint: str): disk_controller = self._get_disk_controller(node, device, mountpoint) self.detached_disks[disk_controller.id] = disk_controller disk_controller.detach() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Attach disk {device} at {mountpoint} on node {node}") + @reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}") def attach_disk(self, node: StorageNode, device: str, mountpoint: str): disk_controller = self._get_disk_controller(node, device, mountpoint) disk_controller.attach() self.detached_disks.pop(disk_controller.id, None) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Restore detached disks") + @reporter.step_deco("Restore detached disks") def restore_disks(self): for disk_controller in self.detached_disks.values(): disk_controller.attach() self.detached_disks = {} @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop all {service_type} services") - def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True): - services = self.cluster.services(service_type) - self.stopped_services.update(services) - parallel([service.stop_service for service in services], mask=mask) + @reporter.step_deco("Stop storage service on {node}") + def stop_storage_service(self, node: ClusterNode): + node.storage_node.stop_service() + self.stopped_storage_nodes.append(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start all {service_type} services") - def start_services_of_type(self, service_type: type[ServiceClass]): - services = self.cluster.services(service_type) - parallel([service.start_service for service in services]) - self.stopped_services.difference_update(set(services)) - - if service_type == StorageNode: - self.wait_after_storage_startup() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Send sighup to all {service_type} services") - def sighup_services_of_type(self, service_type: type[ServiceClass]): - services = self.cluster.services(service_type) - parallel([service.send_signal_to_service for service in services], signal="SIGHUP") - - if service_type == StorageNode: - self.wait_after_storage_startup() - - @wait_for_success(600, 60) - def wait_s3gate(self, s3gate: S3Gate): - with reporter.step(f"Wait for {s3gate} reconnection"): - result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") - assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node" - - @reporter.step("Wait for S3Gates reconnection to local storage") - def wait_s3gates(self): - online_s3gates = self._get_online(S3Gate) - if online_s3gates: - parallel(self.wait_s3gate, online_s3gates) - - @reporter.step("Wait for cluster startup healtcheck") - def wait_startup_healthcheck(self): - nodes = self.cluster.nodes(self._get_online(StorageNode)) - parallel(self.healthcheck.startup_healthcheck, nodes) - - @reporter.step("Wait for storage reconnection to the system") - def wait_after_storage_startup(self): - self.wait_startup_healthcheck() - self.wait_s3gates() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start all stopped services") - def start_all_stopped_services(self): - stopped_storages = self._get_stopped_by_type(StorageNode) - parallel([service.start_service for service in self.stopped_services]) - self.stopped_services.clear() - - if stopped_storages: - self.wait_after_storage_startup() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop {service_type} service on {node}") - def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True): - service = node.service(service_type) - service.stop_service(mask) - self.stopped_services.add(service) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Send sighup to {service_type} service on {node}") - def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass): - service = node.service(service_type) - service.send_signal_to_service("SIGHUP") - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start {service_type} service on {node}") - def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass): - service = node.service(service_type) - service.start_service() - self.stopped_services.discard(service) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start all stopped {service_type} services") - def start_stopped_services_of_type(self, service_type: ServiceClass): - stopped_svc = self._get_stopped_by_type(service_type) - if not stopped_svc: - return - - parallel([svc.start_service for svc in stopped_svc]) - self.stopped_services.difference_update(stopped_svc) - - if service_type == StorageNode: - self.wait_after_storage_startup() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Restart {service_type} service on {node}") - def restart_service_of_type(self, node: ClusterNode, service_type: ServiceClass): - service = node.service(service_type) - service.restart_service() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Restart all {service_type} services") - def restart_services_of_type(self, service_type: type[ServiceClass]): - services = self.cluster.services(service_type) - parallel([service.restart_service for service in services]) - - if service_type == StorageNode: - self.wait_after_storage_startup() - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop storage service on {node}") - def stop_storage_service(self, node: ClusterNode, mask: bool = True): - self.stop_service_of_type(node, StorageNode, mask) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start storage service on {node}") + @reporter.step_deco("Start storage service on {node}") def start_storage_service(self, node: ClusterNode): - self.start_service_of_type(node, StorageNode) + node.storage_node.start_service() + self.stopped_storage_nodes.remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Suspend {process_name} service in {node}") - def suspend_service(self, process_name: str, node: ClusterNode): - node.host.wait_success_suspend_process(process_name) - if self.suspended_services.get(process_name): - self.suspended_services[process_name].append(node) - else: - self.suspended_services[process_name] = [node] + @reporter.step_deco("Start stopped storage services") + def start_stopped_storage_services(self): + if self.stopped_storage_nodes: + # In case if we stopped couple services, for example (s01-s04): + # After starting only s01, it may require connections to s02-s04, which is still down, and fail to start. + # Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state. + # So in order to make sure that services are at least attempted to be started, using threads here. + with ThreadPoolExecutor(max_workers=len(self.stopped_storage_nodes)) as executor: + start_result = executor.map(self.start_storage_service, self.stopped_storage_nodes) + + # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, + # But will be thrown here. + # Not ideal solution, but okay for now + for _ in start_result: + pass + + wait_all_storage_nodes_returned(self.shell, self.cluster) + self.stopped_storage_nodes = [] @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Resume {process_name} service in {node}") - def resume_service(self, process_name: str, node: ClusterNode): - node.host.wait_success_resume_process(process_name) - if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]: - self.suspended_services[process_name].remove(node) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start suspend processes services") - def resume_suspended_services(self): - for process_name, list_nodes in self.suspended_services.items(): - [node.host.wait_success_resume_process(process_name) for node in list_nodes] - self.suspended_services = {} - - @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") - def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: - interfaces_tables = self._parse_interfaces(block_nodes, name_interface) - IpHelper.drop_input_traffic_to_node(node, interfaces_tables) - time.sleep(wakeup_timeout) - self.dropped_traffic.add(node) - - @reporter.step("Start traffic to {node}") - def restore_traffic(self, node: ClusterNode) -> None: - IpHelper.restore_input_traffic_to_node(node=node) - self.dropped_traffic.discard(node) - - @reporter.step("Restore blocked nodes") - def restore_all_traffic(self): - if not self.dropped_traffic: - return - parallel(self._restore_traffic_to_node, self.dropped_traffic) - self.dropped_traffic.clear() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Hard reboot host {node} via magic SysRq option") - def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True): + @reporter.step_deco("Hard reboot host {node} via magic SysRq option") + def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True): shell = node.host.get_shell() shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') options = CommandOptions(close_stdin=True, timeout=1, check=False) shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) - # Drop ssh connection for this node - provider = SshConnectionProvider() - provider.drop(node.host_ip) - if wait_for_return: # Let the things to be settled # A little wait here to prevent ssh stuck during panic time.sleep(10) - self._wait_for_host_online(node) - if startup_healthcheck: - self.wait_startup_healthcheck() + wait_for_host_online(self.shell, node.storage_node) + wait_for_node_online(node.storage_node) - @reporter.step("Down {interface} to {nodes}") - def down_interface(self, nodes: list[ClusterNode], interface: str): - for node in nodes: - node.host.down_interface(interface=interface) - assert node.host.check_state(interface=interface) == "DOWN" - self.nodes_with_modified_interface.append(node) - - @reporter.step("Up {interface} to {nodes}") - def up_interface(self, nodes: list[ClusterNode], interface: str): - for node in nodes: - node.host.up_interface(interface=interface) - assert node.host.check_state(interface=interface) == "UP" - if node in self.nodes_with_modified_interface: - self.nodes_with_modified_interface.remove(node) - - @reporter.step("Restore interface") - def restore_interfaces(self): - for node in self.nodes_with_modified_interface: - dict_interfaces = node.host.config.interfaces.keys() - for name_interface in dict_interfaces: - if "mgmt" not in name_interface: - node.host.up_interface(interface=name_interface) - - @reporter.step("Get node time") - def get_node_date(self, node: ClusterNode) -> datetime: - shell = node.host.get_shell() - return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") - - @reporter.step("Set time on nodes in {in_date}") - def change_date_on_all_nodes(self, cluster: Cluster, in_date: datetime) -> None: - parallel(self.change_node_date, cluster.cluster_nodes, in_date=in_date) - - @reporter.step("Set time on {node} to {in_date}") - def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: - shell = node.host.get_shell() - in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") - shell.exec(f"timedatectl set-time '{in_date_frmt}'") - node_time = self.get_node_date(node) - - with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): - assert (node_time - in_date).total_seconds() < 60 - - @reporter.step("Restore time on nodes") - def restore_date_on_all_nodes(self, cluster: Cluster) -> None: - parallel(self.restore_node_date, cluster.cluster_nodes) - - @reporter.step("Restore time on {node}") - def restore_node_date(self, node: ClusterNode) -> None: - shell = node.host.get_shell() - now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") - - with reporter.step(f"Set {now_time} time"): - shell.exec(f"timedatectl set-time '{now_time}'") - - @reporter.step("Set MaintenanceModeAllowed - {status}") - def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: - frostfs_adm = FrostfsAdm( - shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH - ) - frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") - - @reporter.step("Set node status to {status} in CSC") - def set_node_status(self, cluster_node: ClusterNode, wallet: WalletInfo, status: NodeStatus, await_tick: bool = True) -> None: - rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() - control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() - - frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(self.shell, wallet, cluster_node) - node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint).stdout) - - if node_netinfo.maintenance_mode_allowed == "false": - with reporter.step("Enable maintenance mode"): - frostfs_adm.morph.set_config("MaintenanceModeAllowed=true") - - with reporter.step(f"Set node status to {status} using FrostfsCli"): - frostfs_cli_remote.control.set_status(control_endpoint, status.value) - - if not await_tick: - return - - with reporter.step("Tick 2 epoch with 2 block await."): - for _ in range(2): - frostfs_adm.morph.force_new_epoch() - time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) - - self.await_node_status(status, wallet, cluster_node) - - @wait_for_success(80, 8, title="Wait for node status become {status}") - def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode, checker_node: ClusterNode = None): - frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) - if not checker_node: - checker_node = cluster_node - netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) - netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node] - if status == NodeStatus.OFFLINE: + @reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") + def wait_for_epochs_align(self, timeout=60): + @wait_for_success(timeout, 5, None, True) + def check_epochs(): + epochs_by_node = epoch.get_epochs_from_nodes(self.shell, self.cluster) assert ( - cluster_node.get_interface(Interfaces.MGMT) not in netmap - ), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline" - else: - assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" + len(set(epochs_by_node.values())) == 1 + ), f"unaligned epochs found: {epochs_by_node}" - def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None: - alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0] - remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage) - self.excluded_from_netmap.extend(removes_nodes) + check_epochs() - def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode): - include_node_to_network_map(include_node, alive_node, self.shell, self.cluster) - self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node)) - - def include_all_excluded_nodes(self): - if not self.excluded_from_netmap: - return - alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0] - if not alive_node: - return - - for exclude_node in self.excluded_from_netmap.copy(): - self.include_node_to_netmap(exclude_node, alive_node) - - def _get_cli( - self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode - ) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: - # TODO Move to service config - host = cluster_node.host - service_config = host.get_service_config(cluster_node.storage_node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml" - wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) - frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) - frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) - return frostfs_adm, frostfs_cli, frostfs_cli_remote - - def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: + def _get_disk_controller( + self, node: StorageNode, device: str, mountpoint: str + ) -> DiskController: disk_controller_id = DiskController.get_id(node, device) if disk_controller_id in self.detached_disks.keys(): disk_controller = self.detached_disks[disk_controller_id] @@ -483,61 +167,3 @@ class ClusterStateController: disk_controller = DiskController(node, device, mountpoint) return disk_controller - - @reporter.step("Restore traffic {node}") - def _restore_traffic_to_node(self, node): - IpHelper.restore_input_traffic_to_node(node) - - def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]: - interfaces_and_tables = set() - for node in nodes: - shell = node.host.get_shell() - lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines() - - ips = [] - tables = [] - - for line in lines: - if "src" not in line or "table local" in line: - continue - parts = line.split() - ips.append(parts[-1]) - if "table" in line: - tables.append(parts[parts.index("table") + 1]) - tables.append(None) - - [interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)] - - return interfaces_and_tables - - @reporter.step("Ping node") - def _ping_host(self, node: ClusterNode): - options = CommandOptions(check=False) - return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code - - @retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online") - def _wait_for_host_online(self, node: ClusterNode): - try: - ping_result = self._ping_host(node) - if ping_result != 0: - return HostStatus.OFFLINE - return node.host.get_host_status() - except Exception as err: - logger.warning(f"Host ping fails with error {err}") - return HostStatus.OFFLINE - - @retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline") - def _wait_for_host_offline(self, node: ClusterNode): - try: - ping_result = self._ping_host(node) - if ping_result == 0: - return HostStatus.ONLINE - return node.host.get_host_status() - except Exception as err: - logger.warning(f"Host ping fails with error {err}") - return HostStatus.ONLINE - - @reporter.step("Get contract by domain - {domain_name}") - def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): - frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC) - return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index 5017406..6607824 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -2,22 +2,22 @@ import json from typing import Any from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards -from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.test_control import wait_for_success class ShardsWatcher: + shards_snapshots: list[dict[str, Any]] = [] + def __init__(self, node_under_test: ClusterNode) -> None: - self.shards_snapshots: list[dict[str, Any]] = [] self.storage_node = node_under_test.storage_node self.take_shards_snapshot() - def take_shards_snapshot(self) -> None: + def take_shards_snapshot(self): snapshot = self.get_shards_snapshot() self.shards_snapshots.append(snapshot) - def get_shards_snapshot(self) -> dict[str, Any]: + def get_shards_snapshot(self): shards_snapshot: dict[str, Any] = {} shards = self.get_shards() @@ -26,17 +26,17 @@ class ShardsWatcher: return shards_snapshot - def _get_current_snapshot(self) -> dict[str, Any]: + def _get_current_snapshot(self): return self.shards_snapshots[-1] - def _get_previous_snapshot(self) -> dict[str, Any]: + def _get_previous_snapshot(self): return self.shards_snapshots[-2] - def _is_shard_present(self, shard_id) -> bool: + def _is_shard_present(self, shard_id): snapshot = self._get_current_snapshot() return shard_id in snapshot - def get_shards_with_new_errors(self) -> dict[str, Any]: + def get_shards_with_new_errors(self): current_snapshot = self._get_current_snapshot() previous_snapshot = self._get_previous_snapshot() shards_with_new_errors: dict[str, Any] = {} @@ -46,7 +46,7 @@ class ShardsWatcher: return shards_with_new_errors - def get_shards_with_errors(self) -> dict[str, Any]: + def get_shards_with_errors(self): snapshot = self.get_shards_snapshot() shards_with_errors: dict[str, Any] = {} for shard_id, shard in snapshot.items(): @@ -55,7 +55,7 @@ class ShardsWatcher: return shards_with_errors - def get_shard_status(self, shard_id: str): # -> Any: + def get_shard_status(self, shard_id: str): snapshot = self.get_shards_snapshot() assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}" @@ -63,26 +63,28 @@ class ShardsWatcher: return snapshot[shard_id]["mode"] @wait_for_success(60, 2) - def await_for_all_shards_status(self, status: str) -> None: + def await_for_all_shards_status(self, status: str): snapshot = self.get_shards_snapshot() for shard_id in snapshot: assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status" @wait_for_success(60, 2) - def await_for_shard_status(self, shard_id: str, status: str) -> None: + def await_for_shard_status(self, shard_id: str, status: str): assert self.get_shard_status(shard_id) == status @wait_for_success(60, 2) - def await_for_shard_have_new_errors(self, shard_id: str) -> None: + def await_for_shard_have_new_errors(self, shard_id: str): self.take_shards_snapshot() assert self._is_shard_present(shard_id) shards_with_new_errors = self.get_shards_with_new_errors() - assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" + assert ( + shard_id in shards_with_new_errors + ), f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" @wait_for_success(300, 5) - def await_for_shards_have_no_new_errors(self) -> None: + def await_for_shards_have_no_new_errors(self): self.take_shards_snapshot() shards_with_new_errors = self.get_shards_with_new_errors() assert len(shards_with_new_errors) == 0 @@ -97,20 +99,19 @@ class ShardsWatcher: endpoint=self.storage_node.get_control_endpoint(), wallet=self.storage_node.get_remote_wallet_path(), wallet_password=self.storage_node.get_wallet_password(), - json_mode=True, ) return json.loads(response.stdout.split(">", 1)[1]) - def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True) -> CommandResult: + def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True): shards_cli = FrostfsCliShards( self.storage_node.host.get_shell(), self.storage_node.host.get_cli_config("frostfs-cli").exec_path, ) return shards_cli.set_mode( - endpoint=self.storage_node.get_control_endpoint(), - wallet=self.storage_node.get_remote_wallet_path(), - wallet_password=self.storage_node.get_wallet_password(), + self.storage_node.get_control_endpoint(), + self.storage_node.get_remote_wallet_path(), + self.storage_node.get_wallet_password(), mode=mode, id=[shard_id], clear_errors=clear_errors, diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py deleted file mode 100644 index f0b2a21..0000000 --- a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import Any - -from frostfs_testlib import reporter -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager -from frostfs_testlib.storage.dataclasses.node_base import ServiceClass -from frostfs_testlib.testing import parallel - - -class ConfigStateManager(StateManager): - def __init__(self, cluster_state_controller: ClusterStateController) -> None: - super().__init__(cluster_state_controller) - self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set() - self.cluster = self.csc.cluster - - @reporter.step("Change configuration for {service_type} on all nodes") - def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False): - services = self.cluster.services(service_type) - nodes = self.cluster.nodes(services) - self.services_with_changed_config.update([(node, service_type) for node in nodes]) - - if not sighup: - self.csc.stop_services_of_type(service_type) - - parallel([node.config(service_type).set for node in nodes], values=values) - if not sighup: - self.csc.start_services_of_type(service_type) - else: - self.csc.sighup_services_of_type(service_type) - - @reporter.step("Change configuration for {service_type} on {node}") - def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): - self.services_with_changed_config.add((node, service_type)) - - self.csc.stop_service_of_type(node, service_type) - node.config(service_type).set(values) - self.csc.start_service_of_type(node, service_type) - - @reporter.step("Revert all configuration changes") - def revert_all(self, sighup: bool = False): - if not self.services_with_changed_config: - return - - parallel(self._revert_svc, self.services_with_changed_config, sighup) - self.services_with_changed_config.clear() - - if not sighup: - self.csc.start_all_stopped_services() - - # TODO: parallel can't have multiple parallel_items :( - @reporter.step("Revert all configuration {node_and_service}") - def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False): - node, service_type = node_and_service - service = node.service(service_type) - - if not sighup: - self.csc.stop_service_of_type(node, service_type) - - node.config(service_type).revert() - - if sighup: - service.send_signal_to_service("SIGHUP") diff --git a/src/frostfs_testlib/storage/dataclasses/acl.py b/src/frostfs_testlib/storage/dataclasses/acl.py index 362dee9..cceb4d8 100644 --- a/src/frostfs_testlib/storage/dataclasses/acl.py +++ b/src/frostfs_testlib/storage/dataclasses/acl.py @@ -1,9 +1,8 @@ import logging from dataclasses import dataclass +from enum import Enum from typing import Any, Dict, List, Optional, Union -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import wallet_utils logger = logging.getLogger("NeoLogger") @@ -11,7 +10,7 @@ EACL_LIFETIME = 100500 FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 -class EACLOperation(HumanReadableEnum): +class EACLOperation(Enum): PUT = "put" GET = "get" HEAD = "head" @@ -21,24 +20,24 @@ class EACLOperation(HumanReadableEnum): DELETE = "delete" -class EACLAccess(HumanReadableEnum): +class EACLAccess(Enum): ALLOW = "allow" DENY = "deny" -class EACLRole(HumanReadableEnum): +class EACLRole(Enum): OTHERS = "others" USER = "user" SYSTEM = "system" -class EACLHeaderType(HumanReadableEnum): +class EACLHeaderType(Enum): REQUEST = "req" # Filter request headers OBJECT = "obj" # Filter object headers SERVICE = "SERVICE" # Filter service headers. These are not processed by FrostFS nodes and exist for service use only -class EACLMatchType(HumanReadableEnum): +class EACLMatchType(Enum): STRING_EQUAL = "=" # Return true if strings are equal STRING_NOT_EQUAL = "!=" # Return true if strings are different @@ -65,7 +64,11 @@ class EACLFilters: def __str__(self): return ",".join( - [f"{filter.header_type.value}:" f"{filter.key}{filter.match_type.value}{filter.value}" for filter in self.filters] + [ + f"{filter.header_type.value}:" + f"{filter.key}{filter.match_type.value}{filter.value}" + for filter in self.filters + ] if self.filters else [] ) @@ -80,7 +83,7 @@ class EACLPubKey: class EACLRule: operation: Optional[EACLOperation] = None access: Optional[EACLAccess] = None - role: Optional[Union[EACLRole, WalletInfo]] = None + role: Optional[Union[EACLRole, str]] = None filters: Optional[EACLFilters] = None def to_dict(self) -> Dict[str, Any]: @@ -92,9 +95,9 @@ class EACLRule: } def __str__(self): - role = "" - if isinstance(self.role, EACLRole): - role = self.role.value - if isinstance(self.role, WalletInfo): - role = f"pubkey:{wallet_utils.get_wallet_public_key(self.role.path, self.role.password)}" + role = ( + self.role.value + if isinstance(self.role, EACLRole) + else f'pubkey:{wallet_utils.get_wallet_public_key(self.role, "")}' + ) return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}' diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py deleted file mode 100644 index 1199435..0000000 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ /dev/null @@ -1,154 +0,0 @@ -import logging -from dataclasses import dataclass -from enum import Enum -from typing import Optional - -from frostfs_testlib.testing.readable import HumanReadableEnum -from frostfs_testlib.utils import string_utils - -logger = logging.getLogger("NeoLogger") -EACL_LIFETIME = 100500 -FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 - - -class ObjectOperations(HumanReadableEnum): - PUT = "object.put" - PATCH = "object.patch" - GET = "object.get" - HEAD = "object.head" - GET_RANGE = "object.range" - GET_RANGE_HASH = "object.hash" - SEARCH = "object.search" - DELETE = "object.delete" - WILDCARD_ALL = "object.*" - - @staticmethod - def get_all(): - return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] - - -class ContainerOperations(HumanReadableEnum): - PUT = "container.put" - GET = "container.get" - LIST = "container.list" - DELETE = "container.delete" - WILDCARD_ALL = "container.*" - - @staticmethod - def get_all(): - return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] - - -@dataclass -class Operations: - GET_CONTAINER = "GetContainer" - PUT_CONTAINER = "PutContainer" - DELETE_CONTAINER = "DeleteContainer" - LIST_CONTAINER = "ListContainers" - GET_OBJECT = "GetObject" - DELETE_OBJECT = "DeleteObject" - HASH_OBJECT = "HashObject" - RANGE_OBJECT = "RangeObject" - SEARCH_OBJECT = "SearchObject" - HEAD_OBJECT = "HeadObject" - PUT_OBJECT = "PutObject" - PATCH_OBJECT = "PatchObject" - - -class Verb(HumanReadableEnum): - ALLOW = "allow" - DENY = "deny" - - -class Role(HumanReadableEnum): - OWNER = "owner" - IR = "ir" - CONTAINER = "container" - OTHERS = "others" - - -class ConditionType(HumanReadableEnum): - RESOURCE = "ResourceCondition" - REQUEST = "RequestCondition" - - -# See https://git.frostfs.info/TrueCloudLab/policy-engine/src/branch/master/schema/native/consts.go#L40-L53 -class ConditionKey(HumanReadableEnum): - ROLE = '"\\$Actor:role"' - PUBLIC_KEY = '"\\$Actor:publicKey"' - OBJECT_TYPE = '"\\$Object:objectType"' - OBJECT_ID = '"\\$Object:objectID"' - - -class MatchType(HumanReadableEnum): - EQUAL = "=" - NOT_EQUAL = "!=" - - -@dataclass -class Condition: - condition_key: ConditionKey | str - condition_value: str - condition_type: ConditionType = ConditionType.REQUEST - match_type: MatchType = MatchType.EQUAL - - def as_string(self): - key = self.condition_key.value if isinstance(self.condition_key, ConditionKey) else self.condition_key - value = self.condition_value.value if isinstance(self.condition_value, Enum) else self.condition_value - - return f"{self.condition_type.value}:{key}{self.match_type.value}{value}" - - @staticmethod - def by_role(*args, **kwargs) -> "Condition": - return Condition(ConditionKey.ROLE, *args, **kwargs) - - @staticmethod - def by_key(*args, **kwargs) -> "Condition": - return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs) - - @staticmethod - def by_object_type(*args, **kwargs) -> "Condition": - return Condition(ConditionKey.OBJECT_TYPE, *args, **kwargs) - - @staticmethod - def by_object_id(*args, **kwargs) -> "Condition": - return Condition(ConditionKey.OBJECT_ID, *args, **kwargs) - - -class Rule: - def __init__( - self, - access: Verb, - operations: list[ObjectOperations] | ObjectOperations, - conditions: list[Condition] | Condition = None, - chain_id: Optional[str] = None, - ) -> None: - self.access = access - self.operations = operations - - if not conditions: - self.conditions = [] - elif isinstance(conditions, Condition): - self.conditions = [conditions] - else: - self.conditions = conditions - - if not isinstance(self.conditions, list): - raise RuntimeError("Conditions must be a list") - - if not operations: - self.operations = [] - elif isinstance(operations, (ObjectOperations, ContainerOperations)): - self.operations = [operations] - else: - self.operations = operations - - if not isinstance(self.operations, list): - raise RuntimeError("Operations must be a list") - - self.chain_id = chain_id if chain_id else string_utils.unique_name("chain-id-") - - def as_string(self): - conditions = " ".join([cond.as_string() for cond in self.conditions]) - operations = " ".join([op.value for op in self.operations]) - return f"{self.access.value} {operations} {conditions} *" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 4f5c348..7bb4c2b 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -3,7 +3,6 @@ import yaml from frostfs_testlib.blockchain import RPCClient from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.node_base import NodeBase -from frostfs_testlib.storage.dataclasses.shard import Shard class InnerRing(NodeBase): @@ -17,8 +16,12 @@ class InnerRing(NodeBase): """ def service_healthcheck(self) -> bool: - health_metric = "frostfs_ir_ir_health" - output = self.host.get_shell().exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d").stdout + health_metric = "frostfs_node_ir_health" + output = ( + self.host.get_shell() + .exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d") + .stdout + ) return health_metric in output def get_netmap_cleaner_threshold(self) -> str: @@ -39,21 +42,19 @@ class S3Gate(NodeBase): def get_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) - def get_ns_endpoint(self, ns_name: str) -> str: - return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name) - def get_all_endpoints(self) -> list[str]: return [ self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), ] - def get_ns_endpoint(self, ns_name: str) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name) - def service_healthcheck(self) -> bool: health_metric = "frostfs_s3_gw_state_health" - output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout + output = ( + self.host.get_shell() + .exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d") + .stdout + ) return health_metric in output @property @@ -71,7 +72,11 @@ class HTTPGate(NodeBase): def service_healthcheck(self) -> bool: health_metric = "frostfs_http_gw_state_health" - output = self.host.get_shell().exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d").stdout + output = ( + self.host.get_shell() + .exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d") + .stdout + ) return health_metric in output @property @@ -105,8 +110,28 @@ class MorphChain(NodeBase): def label(self) -> str: return f"{self.name}: {self.get_endpoint()}" - def get_http_endpoint(self) -> str: - return self._get_attribute("http_endpoint") + +class MainChain(NodeBase): + """ + Class represents main-chain consensus node in a cluster + + Consensus node is not always the same as physical host: + It can be service running in a container or on physical host (or physical node, if you will): + For testing perspective, it's not relevant how it is actually running, + since frostfs network will still treat it as "node" + """ + + rpc_client: RPCClient + + def construct(self): + self.rpc_client = RPCClient(self.get_endpoint()) + + def get_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_INTERNAL) + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" class StorageNode(NodeBase): @@ -130,47 +155,27 @@ class StorageNode(NodeBase): def service_healthcheck(self) -> bool: health_metric = "frostfs_node_state_health" - output = self.host.get_shell().exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d").stdout + output = ( + self.host.get_shell() + .exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d") + .stdout + ) return health_metric in output - # TODO: Deprecated. Use new approach with config - def get_shard_config_path(self) -> str: - return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH) - - # TODO: Deprecated. Use new approach with config - def get_shards_config(self) -> tuple[str, dict]: - return self.get_config(self.get_shard_config_path()) - - def get_shards(self) -> list[Shard]: - shards = self.config.get("storage:shard") - - if not shards: - raise RuntimeError(f"Cannot get shards information for {self.name} on {self.host.config.address}") - - if "default" in shards: - shards.pop("default") - return [Shard.from_object(shard) for shard in shards.values()] - def get_control_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) def get_un_locode(self): return self._get_attribute(ConfigAttributes.UN_LOCODE) - def get_data_directory(self) -> str: - return self.host.get_data_directory(self.name) - def delete_blobovnicza(self): self.host.delete_blobovnicza(self.name) def delete_fstree(self): self.host.delete_fstree(self.name) - def delete_file(self, file_path: str) -> None: - self.host.delete_file(file_path) - - def is_file_exist(self, file_path) -> bool: - return self.host.is_file_exist(file_path) + def delete_pilorama(self): + self.host.delete_pilorama(self.name) def delete_metabase(self): self.host.delete_metabase(self.name) diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py deleted file mode 100644 index 81e757c..0000000 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ /dev/null @@ -1,36 +0,0 @@ -from frostfs_testlib.hosting import Host -from frostfs_testlib.shell.interfaces import CommandResult - - -class Metrics: - def __init__(self, host: Host, metrics_endpoint: str) -> None: - self.storage = StorageMetrics(host, metrics_endpoint) - - - -class StorageMetrics: - """ - Class represents storage metrics in a cluster - """ - def __init__(self, host: Host, metrics_endpoint: str) -> None: - self.host = host - self.metrics_endpoint = metrics_endpoint - - def get_metrics_search_by_greps(self, **greps) -> CommandResult: - """ - Get a metrics, search by: cid, metric_type, shard_id etc. - Args: - greps: dict of grep-command-name and value - for example get_metrics_search_by_greps(command='container_objects_total', cid='123456') - Return: - result of metrics - """ - shell = self.host.get_shell() - additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) - result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") - return result - - def get_all_metrics(self) -> CommandResult: - shell = self.host.get_shell() - result = shell.exec(f"curl -s {self.metrics_endpoint}") - return result diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 5c8b723..8fcb03b 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -1,23 +1,17 @@ -from abc import abstractmethod +from abc import ABC, abstractmethod from dataclasses import dataclass -from datetime import datetime, timezone -from typing import Optional, TypedDict, TypeVar +from typing import Optional, Tuple, TypedDict, TypeVar import yaml -from dateutil import parser -from frostfs_testlib import reporter from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration, ServiceConfigurationYml from frostfs_testlib.storage.constants import ConfigAttributes -from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils @dataclass -class NodeBase(HumanReadableABC): +class NodeBase(ABC): """ Represents a node of some underlying service """ @@ -25,7 +19,6 @@ class NodeBase(HumanReadableABC): id: str name: str host: Host - _process_name: str def __init__(self, id, name, host) -> None: self.id = id @@ -55,47 +48,18 @@ class NodeBase(HumanReadableABC): def get_service_systemctl_name(self) -> str: return self._get_attribute(ConfigAttributes.SERVICE_NAME) - def get_process_name(self) -> str: - return self._process_name - def start_service(self): - with reporter.step(f"Unmask {self.name} service on {self.host.config.address}"): - self.host.unmask_service(self.name) - - with reporter.step(f"Start {self.name} service on {self.host.config.address}"): - self.host.start_service(self.name) - - def send_signal_to_service(self, signal: str): - with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"): - self.host.send_signal_to_service(self.name, signal) + self.host.start_service(self.name) @abstractmethod def service_healthcheck(self) -> bool: """Service healthcheck.""" - # TODO: Migrate to sub-class Metrcis (not yet exists :)) - def get_metric(self, metric: str) -> CommandResult: - shell = self.host.get_shell() - result = shell.exec(f"curl -s {self.get_metrics_endpoint()} | grep -e '^{metric}'") - return result - - def get_metrics_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) - - def get_pprof_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF) - - def stop_service(self, mask: bool = True): - if mask: - with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): - self.host.mask_service(self.name) - - with reporter.step(f"Stop {self.name} service on {self.host.config.address}"): - self.host.stop_service(self.name) + def stop_service(self): + self.host.stop_service(self.name) def restart_service(self): - with reporter.step(f"Restart {self.name} service on {self.host.config.address}"): - self.host.restart_service(self.name) + self.host.restart_service(self.name) def get_wallet_password(self) -> str: return self._get_attribute(ConfigAttributes.WALLET_PASSWORD) @@ -122,55 +86,14 @@ class NodeBase(HumanReadableABC): ConfigAttributes.CONFIG_PATH, ) - def get_remote_wallet_config_path(self) -> str: - """ - Returns node config file path located on remote host - """ - return self._get_attribute( - ConfigAttributes.REMOTE_WALLET_CONFIG, - ) - def get_wallet_config_path(self) -> str: return self._get_attribute( ConfigAttributes.LOCAL_WALLET_CONFIG, ConfigAttributes.WALLET_CONFIG, ) - def get_logger_config_path(self) -> str: - """ - Returns config path for logger located on remote host - """ - config_attributes = self.host.get_service_config(self.name) - return ( - self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH) - if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes - else None - ) - - def get_working_dir_path(self) -> Optional[str]: - """ - Returns working directory path located on remote host - """ - config_attributes = self.host.get_service_config(self.name) - return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None - - @property - def config_dir(self) -> str: - return self._get_attribute(ConfigAttributes.CONFIG_DIR) - - @property - def main_config_path(self) -> str: - return self._get_attribute(ConfigAttributes.CONFIG_PATH) - - @property - def config(self) -> ServiceConfigurationYml: - return ServiceConfiguration(self.name, self.host.get_shell(), self.config_dir, self.main_config_path) - - # TODO: Deprecated. Use config with ServiceConfigurationYml interface - def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]: - if config_file_path is None: - config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) - + def get_config(self) -> Tuple[str, dict]: + config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) shell = self.host.get_shell() result = shell.exec(f"cat {config_file_path}") @@ -179,11 +102,8 @@ class NodeBase(HumanReadableABC): config = yaml.safe_load(config_text) return config_file_path, config - # TODO: Deprecated. Use config with ServiceConfigurationYml interface - def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None: - if config_file_path is None: - config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) - + def save_config(self, new_config: dict) -> None: + config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) shell = self.host.get_shell() config_str = yaml.dump(new_config) @@ -194,12 +114,16 @@ class NodeBase(HumanReadableABC): storage_wallet_pass = self.get_wallet_password() return wallet_utils.get_wallet_public_key(storage_wallet_path, storage_wallet_pass) - def _get_attribute(self, attribute_name: str, default_attribute_name: Optional[str] = None) -> str: + def _get_attribute( + self, attribute_name: str, default_attribute_name: Optional[str] = None + ) -> str: config = self.host.get_service_config(self.name) if attribute_name not in config.attributes: if default_attribute_name is None: - raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either") + raise RuntimeError( + f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either" + ) return config.attributes[default_attribute_name] @@ -208,13 +132,6 @@ class NodeBase(HumanReadableABC): def _get_service_config(self) -> ServiceConfig: return self.host.get_service_config(self.name) - def get_service_uptime(self, service: str) -> datetime: - result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2") - start_time = parser.parse(result.stdout.strip()) - current_time = datetime.now(tz=timezone.utc) - active_time = current_time - start_time - return active_time - ServiceClass = TypeVar("ServiceClass", bound=NodeBase) diff --git a/src/frostfs_testlib/storage/dataclasses/object_size.py b/src/frostfs_testlib/storage/dataclasses/object_size.py deleted file mode 100644 index 0429c78..0000000 --- a/src/frostfs_testlib/storage/dataclasses/object_size.py +++ /dev/null @@ -1,13 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class ObjectSize: - name: str - value: int - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return self.__str__() diff --git a/src/frostfs_testlib/storage/dataclasses/policy.py b/src/frostfs_testlib/storage/dataclasses/policy.py deleted file mode 100644 index 872ee05..0000000 --- a/src/frostfs_testlib/storage/dataclasses/policy.py +++ /dev/null @@ -1,13 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class PlacementPolicy: - name: str - value: str - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return self.__str__() diff --git a/src/frostfs_testlib/storage/dataclasses/shard.py b/src/frostfs_testlib/storage/dataclasses/shard.py deleted file mode 100644 index bebdbf5..0000000 --- a/src/frostfs_testlib/storage/dataclasses/shard.py +++ /dev/null @@ -1,92 +0,0 @@ -from dataclasses import dataclass - -from configobj import ConfigObj - -SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_" -BLOBSTOR_PREFIX = "_BLOBSTOR_" - - -@dataclass -class Blobstor: - path: str - path_type: str - - def __eq__(self, other) -> bool: - if not isinstance(other, self.__class__): - raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") - return self.path == other.path and self.path_type == other.path_type - - def __hash__(self): - return hash((self.path, self.path_type)) - - @staticmethod - def from_config_object(section: ConfigObj, shard_id: str, blobstor_id: str): - var_prefix = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}{blobstor_id}" - return Blobstor(section.get(f"{var_prefix}_PATH"), section.get(f"{var_prefix}_TYPE")) - - -@dataclass -class Shard: - blobstor: list[Blobstor] - metabase: str - writecache: str - pilorama: str - - def __eq__(self, other) -> bool: - if not isinstance(other, self.__class__): - raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") - return ( - set(self.blobstor) == set(other.blobstor) - and self.metabase == other.metabase - and self.writecache == other.writecache - and self.pilorama == other.pilorama - ) - - def __hash__(self): - return hash((self.metabase, self.writecache)) - - @staticmethod - def _get_blobstor_count_from_section(config_object: ConfigObj, shard_id: int): - pattern = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}" - blobstors = {key[: len(pattern) + 2] for key in config_object.keys() if pattern in key} - return len(blobstors) - - @staticmethod - def from_config_object(config_object: ConfigObj, shard_id: int): - var_prefix = f"{SHARD_PREFIX}{shard_id}" - - blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id) - blobstors = [Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)] - - write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED") - - return Shard( - blobstors, - config_object.get(f"{var_prefix}_METABASE_PATH"), - config_object.get(f"{var_prefix}_WRITECACHE_PATH") if write_cache_enabled else "", - ) - - @staticmethod - def from_object(shard): - metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"] - writecache_enabled = True - if "enabled" in shard["writecache"]: - writecache_enabled = shard["writecache"]["enabled"] - - writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"] - if not writecache_enabled: - writecache = "" - - # Currently due to issue we need to check if pilorama exists in keys - # TODO: make pilorama mandatory after fix - if shard.get("pilorama"): - pilorama = shard["pilorama"]["path"] if "path" in shard["pilorama"] else shard["pilorama"] - else: - pilorama = None - - return Shard( - blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]], - metabase=metabase, - writecache=writecache, - pilorama=pilorama, - ) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 4c303fc..dd46740 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,12 +1,6 @@ -import re from dataclasses import dataclass from typing import Optional -from pydantic import BaseModel, Field, field_validator - -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing.readable import HumanReadableEnum - @dataclass class ObjectRef: @@ -23,105 +17,9 @@ class LockObjectInfo(ObjectRef): @dataclass class StorageObjectInfo(ObjectRef): size: Optional[int] = None - wallet: Optional[WalletInfo] = None + wallet_file_path: Optional[str] = None file_path: Optional[str] = None file_hash: Optional[str] = None attributes: Optional[list[dict[str, str]]] = None tombstone: Optional[str] = None locks: Optional[list[LockObjectInfo]] = None - - -class NodeStatus(HumanReadableEnum): - MAINTENANCE: str = "maintenance" - ONLINE: str = "online" - OFFLINE: str = "offline" - - -@dataclass -class NodeNetmapInfo: - node_id: str = None - node_status: NodeStatus = None - node_data_ips: list[str] = None - cluster_name: str = None - continent: str = None - country: str = None - country_code: str = None - external_address: list[str] = None - location: str = None - node: str = None - price: int = None - sub_div: str = None - sub_div_code: int = None - un_locode: str = None - role: str = None - - -class Interfaces(HumanReadableEnum): - DATA_O: str = "data0" - DATA_1: str = "data1" - MGMT: str = "mgmt" - INTERNAL_0: str = "internal0" - INTERNAL_1: str = "internal1" - - -@dataclass -class NodeNetInfo: - epoch: str = None - network_magic: str = None - time_per_block: str = None - container_fee: str = None - epoch_duration: str = None - inner_ring_candidate_fee: str = None - maximum_object_size: str = None - maximum_count_of_data_shards: str = None - maximum_count_of_parity_shards: str = None - withdrawal_fee: str = None - homomorphic_hashing_disabled: str = None - maintenance_mode_allowed: str = None - - -class Attributes(BaseModel): - cluster_name: str = Field(alias="ClusterName") - continent: str = Field(alias="Continent") - country: str = Field(alias="Country") - country_code: str = Field(alias="CountryCode") - external_addr: list[str] = Field(alias="ExternalAddr") - location: str = Field(alias="Location") - node: str = Field(alias="Node") - subdiv: str = Field(alias="SubDiv") - subdiv_code: str = Field(alias="SubDivCode") - un_locode: str = Field(alias="UN-LOCODE") - role: str = Field(alias="role") - - @field_validator("external_addr", mode="before") - @classmethod - def convert_external_addr(cls, value: str) -> list[str]: - return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)] - - -class NodeInfo(BaseModel): - public_key: str = Field(alias="publicKey") - addresses: list[str] = Field(alias="addresses") - state: str = Field(alias="state") - attributes: Attributes = Field(alias="attributes") - - @field_validator("addresses", mode="before") - @classmethod - def convert_external_addr(cls, value: str) -> list[str]: - return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))] - - -@dataclass -class Chunk: - def __init__(self, object_id: str, required_nodes: list, confirmed_nodes: list, ec_parent_object_id: str, ec_index: int) -> None: - self.object_id = object_id - self.required_nodes = required_nodes - self.confirmed_nodes = confirmed_nodes - self.ec_parent_object_id = ec_parent_object_id - self.ec_index = ec_index - - def __str__(self) -> str: - return self.object_id - - def __repr__(self) -> str: - return self.object_id diff --git a/src/frostfs_testlib/storage/dataclasses/wallet.py b/src/frostfs_testlib/storage/dataclasses/wallet.py index d053d29..1d66c4b 100644 --- a/src/frostfs_testlib/storage/dataclasses/wallet.py +++ b/src/frostfs_testlib/storage/dataclasses/wallet.py @@ -1,15 +1,13 @@ import json import logging import os +import uuid from dataclasses import dataclass from typing import Optional -import yaml - -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.cluster import NodeBase +from frostfs_testlib.storage.cluster import Cluster, NodeBase from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet logger = logging.getLogger("frostfs.testlib.utils") @@ -23,13 +21,9 @@ class WalletInfo: @staticmethod def from_node(node: NodeBase): - wallet_path = node.get_wallet_path() - wallet_password = node.get_wallet_password() - wallet_config_file = os.path.join(ASSETS_DIR, os.path.basename(node.get_wallet_config_path())) - with open(wallet_config_file, "w") as file: - file.write(yaml.dump({"wallet": wallet_path, "password": wallet_password})) - - return WalletInfo(wallet_path, wallet_password, wallet_config_file) + return WalletInfo( + node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path() + ) def get_address(self) -> str: """ @@ -53,17 +47,22 @@ class WalletInfo: """ with open(self.path, "r") as wallet: wallet_json = json.load(wallet) - assert abs(account_id) + 1 <= len(wallet_json["accounts"]), f"There is no index '{account_id}' in wallet: {wallet_json}" + assert abs(account_id) + 1 <= len( + wallet_json["accounts"] + ), f"There is no index '{account_id}' in wallet: {wallet_json}" return wallet_json["accounts"][account_id]["address"] class WalletFactory: - def __init__(self, wallets_dir: str, shell: Shell) -> None: + def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None: self.shell = shell self.wallets_dir = wallets_dir + self.cluster = cluster - def create_wallet(self, file_name: str, password: Optional[str] = None) -> WalletInfo: + def create_wallet( + self, file_name: Optional[str] = None, password: Optional[str] = None + ) -> WalletInfo: """ Creates new default wallet. @@ -75,6 +74,8 @@ class WalletFactory: WalletInfo object of new wallet. """ + if file_name is None: + file_name = str(uuid.uuid4()) if password is None: password = "" @@ -84,8 +85,6 @@ class WalletFactory: init_wallet(wallet_path, password) with open(wallet_config_path, "w") as config_file: - config_file.write(f'wallet: {wallet_path}\npassword: "{password}"') - - reporter.attach(wallet_path, os.path.basename(wallet_path)) + config_file.write(f'password: "{password}"') return WalletInfo(wallet_path, password, wallet_config_path) diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py deleted file mode 100644 index c1e3a31..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py +++ /dev/null @@ -1,14 +0,0 @@ -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper - - -class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli) - self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli) - self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli) - - -class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper): - pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py deleted file mode 100644 index 18e8ae5..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .chunks import ChunksOperations -from .container import ContainerOperations -from .netmap import NetmapOperations -from .object import ObjectOperations diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py deleted file mode 100644 index 0d787e2..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ /dev/null @@ -1,165 +0,0 @@ -import json -from typing import Optional - -from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo -from frostfs_testlib.storage.grpc_operations import interfaces -from frostfs_testlib.testing.test_control import wait_for_success -from frostfs_testlib.utils.cli_utils import parse_netmap_output - - -class ChunksOperations(interfaces.ChunksInterface): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - - @reporter.step("Search node without chunks") - def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: - if not endpoint: - endpoint = cluster.default_rpc_endpoint - netmap = parse_netmap_output(self.cli.netmap.snapshot(endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) - chunks_node_key = [] - for chunk in chunks: - chunks_node_key.extend(chunk.confirmed_nodes) - for node_info in netmap.copy(): - if node_info.node_id in chunks_node_key and node_info in netmap: - netmap.remove(node_info) - result = [] - for node_info in netmap: - for cluster_node in cluster.cluster_nodes: - if node_info.node == cluster_node.get_interface(Interfaces.MGMT): - result.append(cluster_node) - return result - - @reporter.step("Search node with chunk {chunk}") - def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: - netmap = parse_netmap_output(self.cli.netmap.snapshot(cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) - for node_info in netmap: - if node_info.node_id in chunk.confirmed_nodes: - for cluster_node in cluster.cluster_nodes: - if cluster_node.get_interface(Interfaces.MGMT) == node_info.node: - return (cluster_node, node_info) - - @wait_for_success(300, 5, fail_testcase=None) - @reporter.step("Search shard with chunk {chunk}") - def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: - oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}" - node_shell = node.storage_node.host.get_shell() - shards_watcher = ShardsWatcher(node) - - with reporter.step("Search object file"): - for shard_id, shard_info in shards_watcher.shards_snapshots[-1].items(): - check_dir = node_shell.exec(f" [ -d {shard_info['blobstor'][1]['path']}/{oid_path} ] && echo 1 || echo 0").stdout - if "1" in check_dir.strip(): - return shard_id - - @reporter.step("Get all chunks") - def get_all( - self, - rpc_endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = True, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> list[Chunk]: - object_nodes = self.cli.object.nodes( - rpc_endpoint=rpc_endpoint, - cid=cid, - address=address, - bearer=bearer, - generate_key=generate_key, - oid=oid, - trace=trace, - root=root, - verify_presence_all=verify_presence_all, - json=json, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0]) - - @reporter.step("Get last parity chunk") - def get_parity( - self, - rpc_endpoint: str, - cid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = True, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - object_nodes = self.cli.object.nodes( - rpc_endpoint=rpc_endpoint, - cid=cid, - address=address, - bearer=bearer, - generate_key=generate_key, - oid=oid, - trace=trace, - root=root, - verify_presence_all=verify_presence_all, - json=json, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1] - - @reporter.step("Get first data chunk") - def get_first_data( - self, - rpc_endpoint: str, - cid: str, - oid: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = True, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> Chunk: - object_nodes = self.cli.object.nodes( - rpc_endpoint=rpc_endpoint, - cid=cid, - address=address, - bearer=bearer, - generate_key=generate_key, - oid=oid, - trace=trace, - root=root, - verify_presence_all=verify_presence_all, - json=json, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] - - def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: - parse_result = json.loads(object_nodes) - if parse_result.get("errors"): - raise RuntimeError(", ".join(parse_result["errors"])) - return [Chunk(**chunk) for chunk in parse_result["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py deleted file mode 100644 index 75af00c..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ /dev/null @@ -1,327 +0,0 @@ -import json -import logging -import re -from typing import List, Optional, Union - -from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.clients.s3 import BucketContainerResolver -from frostfs_testlib.plugins import load_plugin -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.grpc_operations import interfaces -from frostfs_testlib.utils import json_utils - -logger = logging.getLogger("NeoLogger") - - -class ContainerOperations(interfaces.ContainerInterface): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - - @reporter.step("Create Container") - def create( - self, - endpoint: str, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - basic_acl: Optional[str] = None, - await_mode: bool = False, - disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, - name: Optional[str] = None, - nonce: Optional[str] = None, - policy: Optional[str] = None, - session: Optional[str] = None, - subnet: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - A wrapper for `frostfs-cli container create` call. - - Args: - wallet (WalletInfo): a wallet on whose behalf a container is created - rule (optional, str): placement rule for container - basic_acl (optional, str): an ACL for container, will be - appended to `--basic-acl` key - attributes (optional, dict): container attributes , will be - appended to `--attributes` key - session_token (optional, str): a path to session token file - session_wallet(optional, str): a path to the wallet which signed - the session token; this parameter makes sense - when paired with `session_token` - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - options (optional, dict): any other options to pass to the call - name (optional, str): container name attribute - await_mode (bool): block execution until container is persisted - wait_for_creation (): Wait for container shows in container list - timeout: Timeout for the operation. - - Returns: - (str): CID of the created container - """ - result = self.cli.container.create( - rpc_endpoint=endpoint, - policy=policy, - nns_zone=nns_zone, - nns_name=nns_name, - address=address, - attributes=attributes, - basic_acl=basic_acl, - await_mode=await_mode, - disable_timestamp=disable_timestamp, - force=force, - trace=trace, - name=name, - nonce=nonce, - session=session, - subnet=subnet, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - - cid = self._parse_cid(result.stdout) - - logger.info("Container created; waiting until it is persisted in the sidechain") - - return cid - - @reporter.step("List Containers") - def list( - self, - endpoint: str, - name: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - owner: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - **params, - ) -> List[str]: - """ - A wrapper for `frostfs-cli container list` call. It returns all the - available containers for the given wallet. - Args: - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - timeout: Timeout for the operation. - Returns: - (list): list of containers - """ - result = self.cli.container.list( - rpc_endpoint=endpoint, - name=name, - address=address, - generate_key=generate_key, - owner=owner, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - **params, - ) - return result.stdout.split() - - @reporter.step("List Objects in container") - def list_objects( - self, - endpoint: str, - cid: str, - bearer: Optional[str] = None, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[str]: - """ - A wrapper for `frostfs-cli container list-objects` call. It returns all the - available objects in container. - Args: - container_id: cid of container - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - timeout: Timeout for the operation. - Returns: - (list): list of containers - """ - result = self.cli.container.list_objects( - rpc_endpoint=endpoint, - cid=cid, - bearer=bearer, - wallet=wallet, - address=address, - generate_key=generate_key, - trace=trace, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - logger.info(f"Container objects: \n{result}") - return result.stdout.split() - - @reporter.step("Delete container") - def delete( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - await_mode: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - force: bool = False, - trace: bool = False, - ): - return self.cli.container.delete( - rpc_endpoint=endpoint, - cid=cid, - address=address, - await_mode=await_mode, - session=session, - ttl=ttl, - xhdr=xhdr, - force=force, - trace=trace, - ).stdout - - @reporter.step("Get container") - def get( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - json_mode: bool = True, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> Union[dict, str]: - result = self.cli.container.get( - rpc_endpoint=endpoint, - cid=cid, - address=address, - generate_key=generate_key, - await_mode=await_mode, - to=to, - json_mode=json_mode, - trace=trace, - ttl=ttl, - xhdr=xhdr, - timeout=timeout, - ) - container_info = json.loads(result.stdout) - attributes = dict() - for attr in container_info["attributes"]: - attributes[attr["key"]] = attr["value"] - container_info["attributes"] = attributes - container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"]) - return container_info - - @reporter.step("Get eacl container") - def get_eacl( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - json_mode: bool = True, - trace: bool = False, - to: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ): - return self.cli.container.get_eacl( - rpc_endpoint=endpoint, - cid=cid, - address=address, - generate_key=generate_key, - await_mode=await_mode, - to=to, - session=session, - ttl=ttl, - xhdr=xhdr, - timeout=CLI_DEFAULT_TIMEOUT, - ).stdout - - @reporter.step("Get nodes container") - def nodes( - self, - endpoint: str, - cid: str, - cluster: Cluster, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[ClusterNode]: - result = self.cli.container.search_node( - rpc_endpoint=endpoint, - cid=cid, - address=address, - ttl=ttl, - from_file=from_file, - trace=trace, - short=short, - xhdr=xhdr, - generate_key=generate_key, - timeout=timeout, - ).stdout - - pattern = r"[0-9]+(?:\.[0-9]+){3}" - nodes_ip = list(set(re.findall(pattern, result))) - - with reporter.step(f"nodes ips = {nodes_ip}"): - nodes_list = cluster.get_nodes_by_ip(nodes_ip) - - with reporter.step(f"Return nodes - {nodes_list}"): - return nodes_list - - @reporter.step("Resolve container by name") - def resolve_container_by_name(name: str, node: ClusterNode): - resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) - resolver: BucketContainerResolver = resolver_cls() - return resolver.resolve(node, name) - - def _parse_cid(self, output: str) -> str: - """ - Parses container ID from a given CLI output. The input string we expect: - container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN - awaiting... - container has been persisted on sidechain - We want to take 'container ID' value from the string. - - Args: - output (str): CLI output to parse - - Returns: - (str): extracted CID - """ - try: - # taking first line from command's output - first_line = output.split("\n")[0] - except Exception: - first_line = "" - logger.error(f"Got empty output: {output}") - splitted = first_line.split(": ") - if len(splitted) != 2: - raise ValueError(f"no CID was parsed from command output: \t{first_line}") - return splitted[1] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py deleted file mode 100644 index 905171b..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py +++ /dev/null @@ -1,171 +0,0 @@ -import json as module_json -from typing import List, Optional - -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.cli.netmap_parser import NetmapParser -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo - -from .. import interfaces - - -class NetmapOperations(interfaces.NetmapInterface): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - - def epoch( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> int: - """ - Get current epoch number. - """ - output = ( - self.cli.netmap.epoch( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return int(output) - - def netinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeNetInfo: - """ - Get target node info. - """ - output = ( - self.cli.netmap.netinfo( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.netinfo(output) - - def nodeinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> NodeNetmapInfo: - """ - Get target node info. - """ - output = ( - self.cli.netmap.nodeinfo( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - json=json, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.node_info(module_json.loads(output)) - - def snapshot( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[NodeNetmapInfo]: - """ - Get target node info. - """ - output = ( - self.cli.netmap.snapshot( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.snapshot_all_nodes(output) - - def snapshot_one_node( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = True, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> List[NodeNetmapInfo]: - """ - Get target one node info. - """ - output = ( - self.cli.netmap.snapshot( - rpc_endpoint=rpc_endpoint, - wallet=wallet, - address=address, - generate_key=generate_key, - ttl=ttl, - trace=trace, - xhdr=xhdr, - timeout=timeout, - ) - .stdout.split("Trace ID")[0] - .strip() - ) - - return NetmapParser.snapshot_one_node(output, rpc_endpoint) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py deleted file mode 100644 index be8a470..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ /dev/null @@ -1,708 +0,0 @@ -import json -import logging -import os -import re -import uuid -from typing import Any, Optional - -from frostfs_testlib import reporter, utils -from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.resources.common import ASSETS_DIR -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces -from frostfs_testlib.storage.grpc_operations import interfaces -from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations -from frostfs_testlib.testing.test_control import wait_for_success -from frostfs_testlib.utils import cli_utils, file_utils - -logger = logging.getLogger("NeoLogger") - - -class ObjectOperations(interfaces.ObjectInterface): - def __init__(self, cli: FrostfsCli) -> None: - self.cli = cli - self.chunks: interfaces.ChunksInterface = ChunksOperations(self.cli) - - @reporter.step("Delete object") - def delete( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - DELETE an Object. - - Args: - cid: ID of Container where we get the Object from - oid: ID of Object we are going to delete - bearer: path to Bearer Token file, appends to `--bearer` key - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): Tombstone ID - """ - result = self.cli.object.delete( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - id_str = result.stdout.split("\n")[1] - tombstone = id_str.split(":")[1] - return tombstone.strip() - - @reporter.step("Get object") - def get( - self, - cid: str, - oid: str, - endpoint: str, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> file_utils.TestFile: - """ - GET from FrostFS. - - Args: - cid (str): ID of Container where we get the Object from - oid (str): Object ID - bearer: path to Bearer Token file, appends to `--bearer` key - write_object: path to downloaded file, appends to `--file` key - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - no_progress(optional, bool): do not show progress bar - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): path to downloaded file - """ - if not write_object: - write_object = str(uuid.uuid4()) - test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, write_object)) - - self.cli.object.get( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - file=test_file, - bearer=bearer, - no_progress=no_progress, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - return test_file - - @reporter.step("Get object from random node") - def get_from_random_node( - self, - cid: str, - oid: str, - cluster: Cluster, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - GET from FrostFS random storage node - - Args: - cid: ID of Container where we get the Object from - oid: Object ID - cluster: cluster object - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - write_object (optional, str): path to downloaded file, appends to `--file` key - no_progress(optional, bool): do not show progress bar - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): path to downloaded file - """ - endpoint = cluster.get_random_storage_rpc_endpoint() - return self.get( - cid, - oid, - endpoint, - bearer, - write_object, - xhdr, - no_progress, - session, - timeout, - ) - - @reporter.step("Get hash object") - def hash( - self, - rpc_endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - range: Optional[str] = None, - salt: Optional[str] = None, - ttl: Optional[int] = None, - session: Optional[str] = None, - hash_type: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - Get object hash. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - generate_key: Generate new private key. - oid: Object ID. - range: Range to take hash from in the form offset1:length1,... - rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - salt: Salt in hex format. - ttl: TTL value in request meta header (default 2). - session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. - hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). - - Returns: - Command's result. - """ - result = self.cli.object.hash( - rpc_endpoint=rpc_endpoint, - cid=cid, - oid=oid, - address=address, - bearer=bearer, - generate_key=generate_key, - range=range, - salt=salt, - ttl=ttl, - xhdr=xhdr, - session=session, - hash_type=hash_type, - timeout=timeout, - ) - - if range: - # Cut off the range and return only hash - return result.stdout.split(":")[1].strip() - - return result.stdout - - @reporter.step("Head object") - def head( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - json_output: bool = True, - is_raw: bool = False, - is_direct: bool = False, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> CommandResult | Any: - """ - HEAD an Object. - - Args: - cid (str): ID of Container where we get the Object from - oid (str): ObjectID to HEAD - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - endpoint(optional, str): FrostFS endpoint to send request to - json_output(optional, bool): return response in JSON format or not; this flag - turns into `--json` key - is_raw(optional, bool): send "raw" request or not; this flag - turns into `--raw` key - is_direct(optional, bool): send request directly to the node or not; this flag - turns into `--ttl 1` key - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - depending on the `json_output` parameter value, the function returns - (dict): HEAD response in JSON format - or - (str): HEAD response as a plain text - """ - result = self.cli.object.head( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - json_mode=json_output, - raw=is_raw, - ttl=1 if is_direct else None, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - if not json_output: - return result - - try: - decoded = json.loads(result.stdout) - except Exception as exc: - # If we failed to parse output as JSON, the cause might be - # the plain text string in the beginning of the output. - # Here we cut off first string and try to parse again. - logger.info(f"failed to parse output: {exc}") - logger.info("parsing output in another way") - fst_line_idx = result.stdout.find("\n") - decoded = json.loads(result.stdout[fst_line_idx:]) - - # if response - if "chunks" in decoded.keys(): - logger.info("decoding ec chunks") - return decoded["chunks"] - - # If response is Complex Object header, it has `splitId` key - if "splitId" in decoded.keys(): - logger.info("decoding split header") - return utils.json_utils.decode_split_header(decoded) - - # If response is Last or Linking Object header, - # it has `header` dictionary and non-null `split` dictionary - if "split" in decoded["header"].keys(): - if decoded["header"]["split"]: - logger.info("decoding linking object") - return utils.json_utils.decode_linking_object(decoded) - - if decoded["header"]["objectType"] == "STORAGE_GROUP": - logger.info("decoding storage group") - return utils.json_utils.decode_storage_group(decoded) - - if decoded["header"]["objectType"] == "TOMBSTONE": - logger.info("decoding tombstone") - return utils.json_utils.decode_tombstone(decoded) - - logger.info("decoding simple header") - return utils.json_utils.decode_simple_header(decoded) - - @reporter.step("Lock Object") - def lock( - self, - cid: str, - oid: str, - endpoint: str, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - Locks object in container. - - Args: - address: Address of wallet account. - bearer: File with signed JSON or binary encoded bearer token. - cid: Container ID. - oid: Object ID. - lifetime: Lock lifetime. - expire_at: Lock expiration epoch. - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - session: Path to a JSON-encoded container session token. - ttl: TTL value in request meta header (default 2). - wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Dict with request X-Headers. - timeout: Timeout for the operation. - - Returns: - Lock object ID - """ - result = self.cli.object.lock( - rpc_endpoint=endpoint, - lifetime=lifetime, - expire_at=expire_at, - address=address, - cid=cid, - oid=oid, - bearer=bearer, - xhdr=xhdr, - session=session, - ttl=ttl, - timeout=timeout, - ) - - # Splitting CLI output to separate lines and taking the penultimate line - id_str = result.stdout.strip().split("\n")[0] - oid = id_str.split(":")[1] - return oid.strip() - - @reporter.step("Put object") - def put( - self, - path: str, - cid: str, - endpoint: str, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - PUT of given file. - - Args: - path: path to file to be PUT - cid: ID of Container where we get the Object from - bearer: path to Bearer Token file, appends to `--bearer` key - copies_number: Number of copies of the object to store within the RPC call - attributes: User attributes in form of Key1=Value1,Key2=Value2 - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - no_progress: do not show progress bar - expire_at: Last epoch in the life of the object - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str): ID of uploaded Object - """ - result = self.cli.object.put( - rpc_endpoint=endpoint, - file=path, - cid=cid, - attributes=attributes, - bearer=bearer, - copies_number=copies_number, - expire_at=expire_at, - no_progress=no_progress, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - # Splitting CLI output to separate lines and taking the penultimate line - id_str = result.stdout.strip().split("\n")[-2] - oid = id_str.split(":")[1] - return oid.strip() - - @reporter.step("Patch object") - def patch( - self, - cid: str, - oid: str, - endpoint: str, - ranges: list[str] = None, - payloads: list[str] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - trace: bool = False, - ) -> str: - """ - PATCH an object. - - Args: - cid: ID of Container where we get the Object from - oid: Object ID - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] - payloads: An array of file paths to be applied in each range - new_attrs: Attributes to be changed in the format "key1=value1,key2=value2" - replace_attrs: Replace all attributes completely with new ones specified in new_attrs - bearer: Path to Bearer Token file, appends to `--bearer` key - xhdr: Request X-Headers in form of Key=Value - session: Path to a JSON-encoded container session token - timeout: Timeout for the operation - trace: Generate trace ID and print it - Returns: - (str): ID of patched Object - """ - result = self.cli.object.patch( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - range=ranges, - payload=payloads, - new_attrs=new_attrs, - replace_attrs=replace_attrs, - bearer=bearer, - xhdr=xhdr, - session=session, - timeout=timeout, - trace=trace, - ) - return result.stdout.split(":")[1].strip() - - @reporter.step("Put object to random node") - def put_to_random_node( - self, - path: str, - cid: str, - cluster: Cluster, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> str: - """ - PUT of given file to a random storage node. - - Args: - path: path to file to be PUT - cid: ID of Container where we get the Object from - cluster: cluster under test - bearer: path to Bearer Token file, appends to `--bearer` key - copies_number: Number of copies of the object to store within the RPC call - attributes: User attributes in form of Key1=Value1,Key2=Value2 - cluster: cluster under test - no_progress: do not show progress bar - expire_at: Last epoch in the life of the object - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - ID of uploaded Object - """ - endpoint = cluster.get_random_storage_rpc_endpoint() - return self.put( - path, - cid, - endpoint, - bearer, - copies_number, - attributes, - xhdr, - expire_at, - no_progress, - session, - timeout=timeout, - ) - - @reporter.step("Get Range") - def range( - self, - cid: str, - oid: str, - range_cut: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> tuple[file_utils.TestFile, bytes]: - """ - GETRANGE an Object. - - Args: - wallet: wallet on whose behalf GETRANGE is done - cid: ID of Container where we get the Object from - oid: ID of Object we are going to request - range_cut: range to take data from in the form offset:length - shell: executor for cli command - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - bearer: path to Bearer Token file, appends to `--bearer` key - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - timeout: Timeout for the operation. - Returns: - (str, bytes) - path to the file with range content and content of this file as bytes - """ - test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) - - self.cli.object.range( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - range=range_cut, - file=test_file, - bearer=bearer, - xhdr=xhdr, - session=session, - timeout=timeout, - ) - - with open(test_file, "rb") as file: - content = file.read() - return test_file, content - - @reporter.step("Search object") - def search( - self, - cid: str, - endpoint: str, - bearer: str = "", - oid: Optional[str] = None, - filters: Optional[dict] = None, - expected_objects_list: Optional[list] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - phy: bool = False, - root: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - ttl: Optional[int] = None, - ) -> list: - """ - SEARCH an Object. - - Args: - wallet: wallet on whose behalf SEARCH is done - cid: ID of Container where we get the Object from - shell: executor for cli command - bearer: path to Bearer Token file, appends to `--bearer` key - endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - filters: key=value pairs to filter Objects - expected_objects_list: a list of ObjectIDs to compare found Objects with - xhdr: Request X-Headers in form of Key=Value - session: path to a JSON-encoded container session token - phy: Search physically stored objects. - root: Search for user objects. - timeout: Timeout for the operation. - - Returns: - list of found ObjectIDs - """ - result = self.cli.object.search( - rpc_endpoint=endpoint, - cid=cid, - bearer=bearer, - oid=oid, - xhdr=xhdr, - filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, - session=session, - phy=phy, - root=root, - address=address, - generate_key=generate_key, - ttl=ttl, - timeout=timeout, - ) - - found_objects = re.findall(r"(\w{43,44})", result.stdout) - - if expected_objects_list: - if sorted(found_objects) == sorted(expected_objects_list): - logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") - else: - logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") - - return found_objects - - @wait_for_success() - @reporter.step("Search object nodes") - def nodes( - self, - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list[ClusterNode]: - endpoint = alive_node.storage_node.get_rpc_endpoint() - - response = self.cli.object.nodes( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - ttl=1 if is_direct else None, - json=True, - xhdr=xhdr, - timeout=timeout, - verify_presence_all=verify_presence_all, - ) - - response_json = json.loads(response.stdout) - # Currently, the command will show expected and confirmed nodes. - # And we (currently) count only nodes which are both expected and confirmed - object_nodes_id = { - required_node - for data_object in response_json["data_objects"] - for required_node in data_object["required_nodes"] - if required_node in data_object["confirmed_nodes"] - } - - netmap_nodes_list = cli_utils.parse_netmap_output( - self.cli.netmap.snapshot( - rpc_endpoint=endpoint, - ).stdout - ) - netmap_nodes = [ - netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id - ] - - object_nodes = [ - cluster_node - for netmap_node in netmap_nodes - for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) - ] - - return object_nodes - - @reporter.step("Search parts of object") - def parts( - self, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list[str]: - endpoint = alive_node.storage_node.get_rpc_endpoint() - response = self.cli.object.nodes( - rpc_endpoint=endpoint, - cid=cid, - oid=oid, - bearer=bearer, - ttl=1 if is_direct else None, - json=True, - xhdr=xhdr, - timeout=timeout, - verify_presence_all=verify_presence_all, - ) - response_json = json.loads(response.stdout) - return [data_object["object_id"] for data_object in response_json["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py deleted file mode 100644 index 17b3e9c..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .chunks import ChunksInterface -from .container import ContainerInterface -from .netmap import NetmapInterface -from .object import ObjectInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py deleted file mode 100644 index 986b938..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py +++ /dev/null @@ -1,79 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional - -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo - - -class ChunksInterface(ABC): - @abstractmethod - def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: - pass - - @abstractmethod - def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: - pass - - @abstractmethod - def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: - pass - - @abstractmethod - def get_all( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> list[Chunk]: - pass - - @abstractmethod - def get_parity( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass - - @abstractmethod - def get_first_data( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py deleted file mode 100644 index d5e3eeb..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py +++ /dev/null @@ -1,125 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -from frostfs_testlib.storage.cluster import Cluster, ClusterNode - - -class ContainerInterface(ABC): - @abstractmethod - def create( - self, - endpoint: str, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - basic_acl: Optional[str] = None, - await_mode: bool = False, - disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, - name: Optional[str] = None, - nonce: Optional[str] = None, - policy: Optional[str] = None, - session: Optional[str] = None, - subnet: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - """ - Create a new container and register it in the FrostFS. - It will be stored in the sidechain when the Inner Ring accepts it. - """ - raise NotImplementedError("No implemethed method create") - - @abstractmethod - def delete( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - await_mode: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - force: bool = False, - trace: bool = False, - ) -> List[str]: - """ - Delete an existing container. - Only the owner of the container has permission to remove the container. - """ - raise NotImplementedError("No implemethed method delete") - - @abstractmethod - def get( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - json_mode: bool = True, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get container field info.""" - raise NotImplementedError("No implemethed method get") - - @abstractmethod - def get_eacl( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - json_mode: bool = True, - trace: bool = False, - to: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get extended ACL table of container.""" - raise NotImplementedError("No implemethed method get-eacl") - - @abstractmethod - def list( - self, - endpoint: str, - name: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - owner: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - **params, - ) -> List[str]: - """List all created containers.""" - raise NotImplementedError("No implemethed method list") - - @abstractmethod - def nodes( - self, - endpoint: str, - cid: str, - cluster: Cluster, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - """Show the nodes participating in the container in the current epoch.""" - raise NotImplementedError("No implemethed method nodes") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py deleted file mode 100644 index 3f0a341..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py +++ /dev/null @@ -1,89 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo - - -class NetmapInterface(ABC): - @abstractmethod - def epoch( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - trace: Optional[bool] = False, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> int: - """ - Get current epoch number. - """ - raise NotImplementedError("No implemethed method epoch") - - @abstractmethod - def netinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeNetInfo: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method netinfo") - - @abstractmethod - def nodeinfo( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> NodeNetmapInfo: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method nodeinfo") - - @abstractmethod - def snapshot( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[NodeNetmapInfo]: - """ - Get target node info. - """ - raise NotImplementedError("No implemethed method snapshot") - - @abstractmethod - def snapshot_one_node( - self, - rpc_endpoint: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - generate_key: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[NodeNetmapInfo]: - """ - Get target one node info. - """ - raise NotImplementedError("No implemethed method snapshot") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py deleted file mode 100644 index 550c461..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py +++ /dev/null @@ -1,223 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, List, Optional - -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.utils import file_utils - -from .chunks import ChunksInterface - - -class ObjectInterface(ABC): - def __init__(self) -> None: - self.chunks: ChunksInterface - - @abstractmethod - def delete( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def get( - self, - cid: str, - oid: str, - endpoint: str, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> file_utils.TestFile: - pass - - @abstractmethod - def get_from_random_node( - self, - cid: str, - oid: str, - cluster: Cluster, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def hash( - self, - endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - range: Optional[str] = None, - salt: Optional[str] = None, - ttl: Optional[int] = None, - session: Optional[str] = None, - hash_type: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def head( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - json_output: bool = True, - is_raw: bool = False, - is_direct: bool = False, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult | Any: - pass - - @abstractmethod - def lock( - self, - cid: str, - oid: str, - endpoint: str, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def put( - self, - path: str, - cid: str, - endpoint: str, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def patch( - self, - cid: str, - oid: str, - endpoint: str, - ranges: Optional[list[str]] = None, - payloads: Optional[list[str]] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - bearer: Optional[str] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - trace: bool = False, - ) -> str: - pass - - @abstractmethod - def put_to_random_node( - self, - path: str, - cid: str, - cluster: Cluster, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def range( - self, - cid: str, - oid: str, - range_cut: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> tuple[file_utils.TestFile, bytes]: - pass - - @abstractmethod - def search( - self, - cid: str, - endpoint: str, - bearer: str = "", - oid: Optional[str] = None, - filters: Optional[dict] = None, - expected_objects_list: Optional[list] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - phy: bool = False, - root: bool = False, - timeout: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - ttl: Optional[int] = None, - ) -> List: - pass - - @abstractmethod - def nodes( - self, - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - pass - - @abstractmethod - def parts( - self, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[str]: - pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py deleted file mode 100644 index 6574012..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py +++ /dev/null @@ -1,10 +0,0 @@ -from abc import ABC - -from . import interfaces - - -class GrpcClientWrapper(ABC): - def __init__(self) -> None: - self.object: interfaces.ObjectInterface - self.container: interfaces.ContainerInterface - self.netmap: interfaces.NetmapInterface diff --git a/src/frostfs_testlib/testing/__init__.py b/src/frostfs_testlib/testing/__init__.py deleted file mode 100644 index 3483972..0000000 --- a/src/frostfs_testlib/testing/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from frostfs_testlib.testing.parallel import parallel -from frostfs_testlib.testing.test_control import expect_not_raises, run_optionally, wait_for_success diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index 50c8eb6..11f67f0 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -1,13 +1,12 @@ -import time from typing import Optional -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import MORPH_BLOCK_TIME +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import Shell from frostfs_testlib.steps import epoch from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode -from frostfs_testlib.utils import datetime_utils + +reporter = get_reporter() # To skip adding every mandatory singleton dependency to EACH test function @@ -15,20 +14,13 @@ class ClusterTestBase: shell: Shell cluster: Cluster - @reporter.step("Tick {epochs_to_tick} epochs, wait {wait_block} block") - def tick_epochs( - self, - epochs_to_tick: int, - alive_node: Optional[StorageNode] = None, - wait_block: int = None, - ): + @reporter.step_deco("Tick {epochs_to_tick} epochs") + def tick_epochs(self, epochs_to_tick: int, alive_node: Optional[StorageNode] = None): for _ in range(epochs_to_tick): - self.tick_epoch(alive_node, wait_block) + self.tick_epoch(alive_node) - def tick_epoch(self, alive_node: Optional[StorageNode] = None, wait_block: int = None, delta: Optional[int] = None): - epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node, delta=delta) - if wait_block: - self.wait_for_blocks(wait_block) + def tick_epoch(self, alive_node: Optional[StorageNode] = None): + epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) def wait_for_epochs_align(self): epoch.wait_for_epochs_align(self.shell, self.cluster) @@ -38,6 +30,3 @@ class ClusterTestBase: def ensure_fresh_epoch(self): return epoch.ensure_fresh_epoch(self.shell, self.cluster) - - def wait_for_blocks(self, blocks_count: int = 1): - time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * blocks_count) diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py deleted file mode 100644 index 6c4f6e0..0000000 --- a/src/frostfs_testlib/testing/parallel.py +++ /dev/null @@ -1,148 +0,0 @@ -import itertools -import traceback -from concurrent.futures import Future, ThreadPoolExecutor -from contextlib import contextmanager -from typing import Callable, Collection, Optional, Union - -MAX_WORKERS = 50 - - -@contextmanager -def parallel_workers_limit(workers_count: int): - global MAX_WORKERS - original_value = MAX_WORKERS - MAX_WORKERS = workers_count - try: - yield - finally: - MAX_WORKERS = original_value - - -def parallel( - fn: Union[Callable, list[Callable]], - parallel_items: Optional[Collection] = None, - *args, - **kwargs, -) -> list[Future]: - """Parallel execution of selected function or list of function using ThreadPoolExecutor. - Also checks the exceptions of each thread. - - Args: - fn: function(s) to run. Can work in 2 modes: - 1. If you have dedicated function with some items to process in parallel, - like you do with executor.map(fn, parallel_items), pass this function as fn. - 2. If you need to process each item with it's own method, like you do - with executor.submit(fn, args, kwargs), pass list of methods here. - See examples in runners.py in this repo. - parallel_items: items to iterate on (should be None in case of 2nd mode). - args: any other args required in target function(s). - if any arg is itertool.cycle, it will be iterated before passing to new thread. - kwargs: any other kwargs required in target function(s) - if any kwarg is itertool.cycle, it will be iterated before passing to new thread. - - Returns: - list of futures. - """ - - if callable(fn): - if not parallel_items: - raise RuntimeError("Parallel items should not be none when fn is callable.") - futures = _run_by_items(fn, parallel_items, *args, **kwargs) - elif isinstance(fn, list): - futures = _run_by_fn_list(fn, *args, **kwargs) - else: - raise RuntimeError("Nothing to run. fn should be either callable or list of callables.") - - # Check for exceptions - exceptions = [future.exception() for future in futures if future.exception()] - if exceptions: - # Prettify exception in parallel with all underlying stack traces - # For example, we had 3 RuntimeError exceptions during parallel. This format will give us something like - # - # RuntimeError: The following exceptions occured during parallel run: - # 1) Exception one text - # 2) Exception two text - # 3) Exception three text - # TRACES: - # ==== 1 ==== - # Traceback (most recent call last): - # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run - # result = self.fn(*self.args, **self.kwargs) - # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service - # raise RuntimeError(f"Exception one text") - # RuntimeError: Exception one text - # - # ==== 2 ==== - # Traceback (most recent call last): - # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run - # result = self.fn(*self.args, **self.kwargs) - # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service - # raise RuntimeError(f"Exception two text") - # RuntimeError: Exception two text - # - # ==== 3 ==== - # Traceback (most recent call last): - # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run - # result = self.fn(*self.args, **self.kwargs) - # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service - # raise RuntimeError(f"Exception three text") - # RuntimeError: Exception three text - short_summary = "\n".join([f"{i}) {str(e)}" for i, e in enumerate(exceptions, 1)]) - stack_traces = "\n".join( - [f"==== {i} ====\n{''.join(traceback.TracebackException.from_exception(e).format())}" for i, e in enumerate(exceptions, 1)] - ) - message = f"{short_summary}\nTRACES:\n{stack_traces}" - raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}") - return futures - - -def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: - if not len(fn_list): - return [] - if not all([callable(f) for f in fn_list]): - raise RuntimeError("fn_list should contain only callables") - - futures: list[Future] = [] - - with ThreadPoolExecutor(max_workers=min(len(fn_list), MAX_WORKERS)) as executor: - for fn in fn_list: - task_args = _get_args(*args) - task_kwargs = _get_kwargs(**kwargs) - - futures.append(executor.submit(fn, *task_args, **task_kwargs)) - - return futures - - -def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]: - futures: list[Future] = [] - - with ThreadPoolExecutor(max_workers=min(len(parallel_items), MAX_WORKERS)) as executor: - for item in parallel_items: - task_args = _get_args(*args) - task_kwargs = _get_kwargs(**kwargs) - task_args.insert(0, item) - - futures.append(executor.submit(fn, *task_args, **task_kwargs)) - - return futures - - -def _get_kwargs(**kwargs): - actkwargs = {} - for key, arg in kwargs.items(): - if isinstance(arg, itertools.cycle): - actkwargs[key] = next(arg) - else: - actkwargs[key] = arg - return actkwargs - - -def _get_args(*args): - actargs = [] - for arg in args: - if isinstance(arg, itertools.cycle): - actargs.append(next(arg)) - else: - actargs.append(arg) - return actargs diff --git a/src/frostfs_testlib/testing/readable.py b/src/frostfs_testlib/testing/readable.py deleted file mode 100644 index 80f1169..0000000 --- a/src/frostfs_testlib/testing/readable.py +++ /dev/null @@ -1,36 +0,0 @@ -from abc import ABCMeta -from enum import Enum - - -class HumanReadableEnum(Enum): - def __str__(self): - return self._name_ - - def __repr__(self): - return self._name_ - - -class HumanReadableABCMeta(ABCMeta): - def __str__(cls): - if "__repr_name__" in cls.__dict__: - return cls.__dict__["__repr_name__"] - return cls.__name__ - - def __repr__(cls): - if "__repr_name__" in cls.__dict__: - return cls.__dict__["__repr_name__"] - return cls.__name__ - - -class HumanReadableABC(metaclass=HumanReadableABCMeta): - @classmethod - def __str__(cls): - if "__repr_name__" in cls.__dict__: - return cls.__dict__["__repr_name__"] - return type(cls).__name__ - - @classmethod - def __repr__(cls): - if "__repr_name__" in cls.__dict__: - return cls.__dict__["__repr_name__"] - return type(cls).__name__ diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py index bc38208..ed74f6a 100644 --- a/src/frostfs_testlib/testing/test_control.py +++ b/src/frostfs_testlib/testing/test_control.py @@ -1,18 +1,12 @@ import inspect import logging -import os from functools import wraps from time import sleep, time from typing import Any -import yaml from _pytest.outcomes import Failed from pytest import fail -from frostfs_testlib import reporter -from frostfs_testlib.resources.common import ASSETS_DIR -from frostfs_testlib.utils.func_utils import format_by_args - logger = logging.getLogger("NeoLogger") # TODO: we may consider deprecating some methods here and use tenacity instead @@ -56,7 +50,7 @@ class expect_not_raises: return impl -def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None, title: str = None): +def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None): """ Decorator to wait for some conditions/functions to pass successfully. This is useful if you don't know exact time when something should pass successfully and do not @@ -68,7 +62,8 @@ def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = Non assert max_attempts >= 1, "Cannot apply retry decorator with max_attempts < 1" def wrapper(func): - def call(func, *a, **kw): + @wraps(func) + def impl(*a, **kw): last_exception = None for _ in range(max_attempts): try: @@ -89,14 +84,6 @@ def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = Non if last_exception is not None: raise last_exception - @wraps(func) - def impl(*a, **kw): - if title is not None: - with reporter.step(format_by_args(func, title, *a, **kw)): - return call(func, *a, **kw) - - return call(func, *a, **kw) - return impl return wrapper @@ -131,49 +118,12 @@ def run_optionally(enabled: bool, mock_value: Any = True): return deco -def cached_fixture(enabled: bool): - """ - Decorator to cache fixtures. - MUST be placed after @pytest.fixture and before @allure decorators. - - Args: - enabled: if true, decorated func will be cached. - """ - - def deco(func): - @wraps(func) - def func_impl(*a, **kw): - # TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters - cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml") - - if enabled and os.path.exists(cache_file): - with open(cache_file, "r") as cache_input: - return yaml.load(cache_input, Loader=yaml.Loader) - - result = func(*a, **kw) - - if enabled: - with open(cache_file, "w") as cache_output: - yaml.dump(result, cache_output) - return result - - # TODO: cache yielding fixtures - @wraps(func) - def gen_impl(*a, **kw): - raise NotImplementedError("Not implemented for yielding fixtures") - - return gen_impl if inspect.isgeneratorfunction(func) else func_impl - - return deco - - def wait_for_success( max_wait_time: int = 60, interval: int = 1, expected_result: Any = None, fail_testcase: bool = False, fail_message: str = "", - title: str = None, ): """ Decorator to wait for some conditions/functions to pass successfully. @@ -184,7 +134,8 @@ def wait_for_success( """ def wrapper(func): - def call(func, *a, **kw): + @wraps(func) + def impl(*a, **kw): start = int(round(time())) last_exception = None while start + max_wait_time >= int(round(time())): @@ -209,14 +160,6 @@ def wait_for_success( if last_exception is not None: raise last_exception - @wraps(func) - def impl(*a, **kw): - if title is not None: - with reporter.step(format_by_args(func, title, *a, **kw)): - return call(func, *a, **kw) - - return call(func, *a, **kw) - return impl return wrapper diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index 4acc5b1..fbc4a8f 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -1,7 +1,3 @@ -""" -Idea of utils is to have small utilitary functions which are not dependent of anything. -""" - import frostfs_testlib.utils.converting_utils import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.json_utils diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 0f9fef2..7ed1a27 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -5,27 +5,76 @@ """ Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs. """ -import csv import json import logging -import re +import subprocess import sys from contextlib import suppress from datetime import datetime -from io import StringIO from textwrap import shorten -from typing import Any, Optional, Union +from typing import TypedDict, Union import pexpect -from frostfs_testlib import reporter -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo +from frostfs_testlib.reporter import get_reporter +reporter = get_reporter() logger = logging.getLogger("NeoLogger") COLOR_GREEN = "\033[92m" COLOR_OFF = "\033[0m" +def _cmd_run(cmd: str, timeout: int = 90) -> str: + """ + Runs given shell command , in case of success returns its stdout, + in case of failure returns error message. + """ + compl_proc = None + start_time = datetime.now() + try: + logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}") + start_time = datetime.utcnow() + compl_proc = subprocess.run( + cmd, + check=True, + universal_newlines=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + timeout=timeout, + shell=True, + ) + output = compl_proc.stdout + return_code = compl_proc.returncode + end_time = datetime.utcnow() + logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}") + _attach_allure_log(cmd, output, return_code, start_time, end_time) + + return output + except subprocess.CalledProcessError as exc: + logger.info( + f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode} " f"\nOutput: {exc.output}" + ) + end_time = datetime.now() + return_code, cmd_output = subprocess.getstatusoutput(cmd) + _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) + + raise RuntimeError( + f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode}\n" f"Output: {exc.output}" + ) from exc + except OSError as exc: + raise RuntimeError(f"Command: {cmd}\n" f"Output: {exc.strerror}") from exc + except Exception as exc: + return_code, cmd_output = subprocess.getstatusoutput(cmd) + end_time = datetime.now() + _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) + logger.info( + f"Command: {cmd}\n" + f"Error:\nreturn code: {return_code}\n" + f"Output: {exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}" + ) + raise + + def _run_with_passwd(cmd: str) -> str: child = pexpect.spawn(cmd) child.delaybeforesend = 1 @@ -40,7 +89,7 @@ def _run_with_passwd(cmd: str) -> str: return cmd.decode() -def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_format: str = "json") -> str: +def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str: child = pexpect.spawn(cmd) child.delaybeforesend = 1 @@ -51,7 +100,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_ child.sendline(access_key) child.expect("Default region name.*") - child.sendline("region") + child.sendline("") child.expect("Default output format.*") child.sendline(out_format) @@ -63,7 +112,9 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_ return cmd.decode() -def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime) -> None: +def _attach_allure_log( + cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime +) -> None: command_attachment = ( f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" @@ -74,136 +125,11 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date reporter.attach(command_attachment, "Command execution") -def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[dict] = None, **kwargs) -> None: +def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None: logger.info(f"{cmd}: {output}") - - if not params: - params = {} - - if params.get("Body") and len(params.get("Body")) > 1000: - params["Body"] = "" - - output_params = params - - try: - json_params = json.dumps(params, indent=4, sort_keys=True, default=str) - except TypeError as err: - logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}") - else: - output_params = json_params - - output = json.dumps(output, indent=4, sort_keys=True, default=str) - - command_execution = f"COMMAND: '{cmd}'\n" f"URL: {kwargs['endpoint']}\n" f"PARAMS:\n{output_params}\n" f"OUTPUT:\n{output}\n" - aws_command = _convert_request_to_aws_cli_command(cmd, params, **kwargs) - - reporter.attach(command_execution, "Command execution") - reporter.attach(aws_command, "AWS CLI Command") - - -def _convert_request_to_aws_cli_command(command: str, params: dict, **kwargs) -> str: - overriden_names = [_convert_json_name_to_aws_cli(name) for name in kwargs.keys()] - command = command.replace("_", "-") - options = [] - - for name, value in params.items(): - name = _convert_json_name_to_aws_cli(name) - - # To override parameters for AWS CLI - if name in overriden_names: - continue - - if option := _create_option(name, value): - options.append(option) - - for name, value in kwargs.items(): - name = _convert_json_name_to_aws_cli(name) - if option := _create_option(name, value): - options.append(option) - - options = " ".join(options) - api = "s3api" if "s3" in kwargs["endpoint"] else "iam" - return f"aws --no-verify-ssl --no-paginate {api} {command} {options}" - - -def _convert_json_name_to_aws_cli(name: str) -> str: - specific_names = {"CORSConfiguration": "cors-configuration"} - - if aws_cli_name := specific_names.get(name): - return aws_cli_name - return re.sub(r"([a-z])([A-Z])", r"\1 \2", name).lower().replace(" ", "-").replace("_", "-") - - -def _create_option(name: str, value: Any) -> str | None: - if isinstance(value, bool) and value: - return f"--{name}" - - if isinstance(value, dict): - value = json.dumps(value, indent=4, sort_keys=True, default=str) - return f"--{name} '{value}'" - - if value: - return f"--{name} {value}" - - return None - - -def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: - """ - The code will parse each line and return each node as dataclass. - """ - netmap_nodes = output.split("Node ")[1:] - dataclasses_netmap = [] - result_netmap = {} - - regexes = { - "node_id": r"\d+: (?P\w+)", - "node_data_ips": r"(?P/ip4/.+?)$", - "node_status": r"(?PONLINE|OFFLINE)", - "cluster_name": r"ClusterName: (?P\w+)", - "continent": r"Continent: (?P\w+)", - "country": r"Country: (?P\w+)", - "country_code": r"CountryCode: (?P\w+)", - "external_address": r"ExternalAddr: (?P/ip[4].+?)$", - "location": r"Location: (?P\w+.*)", - "node": r"Node: (?P\d+\.\d+\.\d+\.\d+)", - "price": r"Price: (?P\d+)", - "sub_div": r"SubDiv: (?P.*)", - "sub_div_code": r"SubDivCode: (?P\w+)", - "un_locode": r"UN-LOCODE: (?P\w+.*)", - "role": r"role: (?P\w+)", - } - - for node in netmap_nodes: - for key, regex in regexes.items(): - search_result = re.search(regex, node, flags=re.MULTILINE) - if key == "node_data_ips": - result_netmap[key] = search_result[key].strip().split(" ") - continue - if key == "external_address": - result_netmap[key] = search_result[key].strip().split(",") - continue - if search_result == None: - result_netmap[key] = None - continue - result_netmap[key] = search_result[key].strip() - - dataclasses_netmap.append(NodeNetmapInfo(**result_netmap)) - - return dataclasses_netmap - - -def parse_cmd_table(output: str, delimiter="|") -> list[dict[str, str]]: - parsing_output = [] - reader = csv.reader(StringIO(output.strip()), delimiter=delimiter) - iter_reader = iter(reader) - header_row = next(iter_reader) - for row in iter_reader: - table = {} - for i in range(len(row)): - header = header_row[i].strip().lower().replace(" ", "_") - value = row[i].strip().lower() - if header: - table[header] = value - parsing_output.append(table) - return parsing_output + with suppress(Exception): + json_output = json.dumps(output, indent=4, sort_keys=True) + output = json_output + command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" + with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): + reporter.attach(command_attachment, "Command execution") diff --git a/src/frostfs_testlib/utils/converting_utils.py b/src/frostfs_testlib/utils/converting_utils.py index 273d9b4..24b77ae 100644 --- a/src/frostfs_testlib/utils/converting_utils.py +++ b/src/frostfs_testlib/utils/converting_utils.py @@ -1,23 +1,10 @@ import base64 import binascii import json -from typing import Tuple import base58 -def calc_unit(value: float, skip_units: int = 0) -> Tuple[float, str]: - units = ["B", "KiB", "MiB", "GiB", "TiB"] - - for unit in units[skip_units:]: - if value < 1024: - return value, unit - - value = value / 1024.0 - - return value, unit - - def str_to_ascii_hex(input: str) -> str: b = binascii.hexlify(input.encode()) return str(b)[2:-1] diff --git a/src/frostfs_testlib/utils/env_utils.py b/src/frostfs_testlib/utils/env_utils.py index 3fdebe1..6b4fb40 100644 --- a/src/frostfs_testlib/utils/env_utils.py +++ b/src/frostfs_testlib/utils/env_utils.py @@ -1,12 +1,13 @@ import logging import re -from frostfs_testlib import reporter +from frostfs_testlib.reporter import get_reporter +reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step("Read environment.properties") +@reporter.step_deco("Read environment.properties") def read_env_properties(file_path: str) -> dict: with open(file_path, "r") as file: raw_content = file.read() @@ -22,7 +23,7 @@ def read_env_properties(file_path: str) -> dict: return env_properties -@reporter.step("Update data in environment.properties") +@reporter.step_deco("Update data in environment.properties") def save_env_properties(file_path: str, env_data: dict) -> None: with open(file_path, "a+") as env_file: for env, env_value in env_data.items(): diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index 5c4d52f..8c6062f 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -3,22 +3,72 @@ from dataclasses import dataclass from time import sleep from typing import Optional -from frostfs_testlib import reporter +from frostfs_testlib.hosting import Host +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME -from frostfs_testlib.shell import Shell +from frostfs_testlib.shell import CommandOptions, Shell from frostfs_testlib.steps.cli.object import neo_go_dump_keys from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain -from frostfs_testlib.storage.dataclasses.node_base import ServiceClass -from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.testing.test_control import retry, wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time +reporter = get_reporter() + logger = logging.getLogger("NeoLogger") -@reporter.step("Check and return status of given service") +@reporter.step_deco("Ping node") +def ping_host(shell: Shell, host: Host): + options = CommandOptions(check=False) + return shell.exec(f"ping {host.config.address} -c 1", options).return_code + + +@reporter.step_deco("Wait for storage nodes returned to cluster") +def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None: + for node in cluster.services(StorageNode): + with reporter.step(f"Run health check for storage at '{node}'"): + wait_for_host_online(shell, node) + wait_for_node_online(node) + + +@retry(max_attempts=60, sleep_interval=5, expected_result=0) +@reporter.step_deco("Waiting for host of {node} to go online") +def wait_for_host_online(shell: Shell, node: StorageNode): + try: + # TODO: Quick solution for now, should be replaced by lib interactions + return ping_host(shell, node.host) + except Exception as err: + logger.warning(f"Host ping fails with error {err}") + return 1 + + +@retry(max_attempts=60, sleep_interval=5, expected_result=1) +@reporter.step_deco("Waiting for host of {node} to go offline") +def wait_for_host_offline(shell: Shell, node: StorageNode): + try: + # TODO: Quick solution for now, should be replaced by lib interactions + return ping_host(shell, node.host) + except Exception as err: + logger.warning(f"Host ping fails with error {err}") + return 0 + + +@retry(max_attempts=20, sleep_interval=30, expected_result=True) +@reporter.step_deco("Waiting for node {node} to go online") +def wait_for_node_online(node: StorageNode): + try: + health_check = storage_node_healthcheck(node) + except Exception as err: + logger.warning(f"Node healthcheck fails with error {err}") + return False + + return health_check.health_status == "READY" and health_check.network_status == "ONLINE" + + +@reporter.step_deco("Check and return status of given service") def service_status(service: str, shell: Shell) -> str: return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() @@ -71,14 +121,14 @@ class TopCommand: ) -@reporter.step("Run `top` command with specified PID") +@reporter.step_deco("Run `top` command with specified PID") def service_status_top(service: str, shell: Shell) -> TopCommand: pid = service_pid(service, shell) output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout return TopCommand.from_stdout(output, pid) -@reporter.step("Restart service n times with sleep") +@reporter.step_deco("Restart service n times with sleep") def multiple_restart( service_type: type[NodeBase], node: ClusterNode, @@ -89,16 +139,19 @@ def multiple_restart( service_name = node.service(service_type).name for _ in range(count): node.host.restart_service(service_name) - logger.info(f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue") + logger.info( + f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue" + ) sleep(sleep_interval) -@wait_for_success(60, 5, title="Wait for services become {expected_status} on node {cluster_node}") -def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str): +@reporter.step_deco("Get status of list of services and check expected status") +@wait_for_success(60, 5) +def check_services_status(service_list: list[str], expected_status: str, shell: Shell): cmd = "" for service in service_list: - cmd += f' sudo systemctl status {service.get_service_systemctl_name()} --lines=0 | grep "Active:";' - result = cluster_node.host.get_shell().exec(cmd).stdout.rstrip() + cmd += f' sudo systemctl status {service} --lines=0 | grep "Active:";' + result = shell.exec(cmd).stdout.rstrip() statuses = list() for line in result.split("\n"): status_substring = line.split() @@ -109,15 +162,19 @@ def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceC ), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}" -@wait_for_success(60, 5, title="Wait for {service} become active") -def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"): +@reporter.step_deco("Wait for active status of passed service") +@wait_for_success(60, 5) +def wait_service_in_desired_state( + service: str, shell: Shell, expected_status: Optional[str] = "active" +): real_status = service_status(service=service, shell=shell) assert ( expected_status == real_status ), f"Service {service}: expected status= {expected_status}, real status {real_status}" -@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1, title="Wait for {service_type} passes healtcheck on {node}") +@reporter.step_deco("Run healthcheck against passed service") +@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1) def service_type_healthcheck( service_type: type[NodeBase], node: ClusterNode, @@ -128,25 +185,26 @@ def service_type_healthcheck( ), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}" -@reporter.step("Kill by process name") +@reporter.step_deco("Kill by process name") def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode): service_systemctl_name = node.service(service_type).get_service_systemctl_name() pid = service_pid(service_systemctl_name, node.host.get_shell()) node.host.get_shell().exec(f"sudo kill -9 {pid}") -@reporter.step("Suspend {service}") +@reporter.step_deco("Service {service} suspend") def suspend_service(shell: Shell, service: str): shell.exec(f"sudo kill -STOP {service_pid(service, shell)}") -@reporter.step("Resume {service}") +@reporter.step_deco("Service {service} resume") def resume_service(shell: Shell, service: str): shell.exec(f"sudo kill -CONT {service_pid(service, shell)}") +@reporter.step_deco("Retrieve service's pid") # retry mechanism cause when the task has been started recently '0' PID could be returned -@wait_for_success(10, 1, title="Get {service} pid") +@wait_for_success(10, 1) def service_pid(service: str, shell: Shell) -> int: output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip() splitted = output.split("=") @@ -155,7 +213,7 @@ def service_pid(service: str, shell: Shell) -> int: return PID -@reporter.step("Wrapper for neo-go dump keys command") +@reporter.step_deco("Wrapper for neo-go dump keys command") def dump_keys(shell: Shell, node: ClusterNode) -> dict: host = node.host service_config = host.get_service_config(node.service(MorphChain).name) @@ -163,7 +221,7 @@ def dump_keys(shell: Shell, node: ClusterNode) -> dict: return neo_go_dump_keys(shell=shell, wallet=wallet) -@reporter.step("Wait for object replication") +@reporter.step_deco("Wait for object replication") def wait_object_replication( cid: str, oid: str, diff --git a/src/frostfs_testlib/utils/file_keeper.py b/src/frostfs_testlib/utils/file_keeper.py index a5670cc..ad6836b 100644 --- a/src/frostfs_testlib/utils/file_keeper.py +++ b/src/frostfs_testlib/utils/file_keeper.py @@ -1,15 +1,17 @@ from concurrent.futures import ThreadPoolExecutor -from frostfs_testlib import reporter +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage.dataclasses.node_base import NodeBase +reporter = get_reporter() + class FileKeeper: """This class is responsible to make backup copy of modified file and restore when required (mostly after the test)""" files_to_restore: dict[NodeBase, list[str]] = {} - @reporter.step("Adding {file_to_restore} from node {node} to restore list") + @reporter.step_deco("Adding {file_to_restore} from node {node} to restore list") def add(self, node: NodeBase, file_to_restore: str): if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]: # Already added @@ -24,7 +26,7 @@ class FileKeeper: shell = node.host.get_shell() shell.exec(f"cp {file_to_restore} {file_to_restore}.bak") - @reporter.step("Restore files") + @reporter.step_deco("Restore files") def restore_files(self): nodes = self.files_to_restore.keys() if not nodes: @@ -39,7 +41,7 @@ class FileKeeper: # Iterate through results for exception check if any pass - @reporter.step("Restore files on node {node}") + @reporter.step_deco("Restore files on node {node}") def _restore_files_on_node(self, node: NodeBase): shell = node.host.get_shell() for file_to_restore in self.files_to_restore[node]: diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index 8839d7f..a41665e 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -4,48 +4,14 @@ import os import uuid from typing import Any, Optional -from frostfs_testlib import reporter +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import ASSETS_DIR -from frostfs_testlib.utils import string_utils +reporter = get_reporter() logger = logging.getLogger("NeoLogger") -class TestFile(os.PathLike): - def __init__(self, path: str): - self.path = path - - def __del__(self): - logger.debug(f"Removing file {self.path}") - if os.path.exists(self.path): - os.remove(self.path) - - def __str__(self): - return self.path - - def __repr__(self): - return self.path - - def __fspath__(self): - return self.path - - -def ensure_directory(path): - directory = os.path.dirname(path) - - if not os.path.exists(directory): - os.makedirs(directory) - - -def ensure_directory_opener(path, flags): - ensure_directory(path) - return os.open(path, flags) - - -# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps -# Use object_size dt in future as argument -@reporter.step("Generate file") -def generate_file(size: int, file_name: Optional[str] = None) -> TestFile: +def generate_file(size: int) -> str: """Generates a binary file with the specified size in bytes. Args: @@ -54,26 +20,19 @@ def generate_file(size: int, file_name: Optional[str] = None) -> TestFile: Returns: The path to the generated file. """ - - if file_name is None: - file_name = string_utils.unique_name("object-") - - test_file = TestFile(os.path.join(ASSETS_DIR, file_name)) - with open(test_file, "wb", opener=ensure_directory_opener) as file: + file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) + with open(file_path, "wb") as file: file.write(os.urandom(size)) - logger.info(f"File with size {size} bytes has been generated: {test_file}") + logger.info(f"File with size {size} bytes has been generated: {file_path}") - return test_file + return file_path -# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps -# Use object_size dt in future as argument -@reporter.step("Generate file with content") def generate_file_with_content( size: int, - file_path: Optional[str | TestFile] = None, + file_path: Optional[str] = None, content: Optional[str] = None, -) -> TestFile: +) -> str: """Creates a new file with specified content. Args: @@ -90,22 +49,20 @@ def generate_file_with_content( content = os.urandom(size) mode = "wb" - test_file = None if not file_path: - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) - elif isinstance(file_path, TestFile): - test_file = file_path + file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) else: - test_file = TestFile(file_path) + if not os.path.exists(os.path.dirname(file_path)): + os.makedirs(os.path.dirname(file_path)) - with open(test_file, mode, opener=ensure_directory_opener) as file: + with open(file_path, mode) as file: file.write(content) - return test_file + return file_path -@reporter.step("Get File Hash") -def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset: Optional[int] = None) -> str: +@reporter.step_deco("Get File Hash") +def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: """Generates hash for the specified file. Args: @@ -131,8 +88,8 @@ def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset: return file_hash.hexdigest() -@reporter.step("Concatenation set of files to one file") -def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional[str | TestFile] = None) -> TestFile: +@reporter.step_deco("Concatenation set of files to one file") +def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: """Concatenates several files into a single file. Args: @@ -142,24 +99,16 @@ def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional Returns: Path to the resulting file. """ - - test_file = None if not resulting_file_path: - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) - elif isinstance(resulting_file_path, TestFile): - test_file = resulting_file_path - else: - test_file = TestFile(resulting_file_path) - - with open(test_file, "wb", opener=ensure_directory_opener) as f: + resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + with open(resulting_file_path, "wb") as f: for file in file_paths: with open(file, "rb") as part_file: f.write(part_file.read()) - return test_file + return resulting_file_path -@reporter.step("Split file to {parts} parts") -def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]: +def split_file(file_path: str, parts: int) -> list[str]: """Splits specified file into several specified number of parts. Each part is saved under name `{original_file}_part_{i}`. @@ -181,7 +130,7 @@ def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]: part_file_paths = [] for content_offset in range(0, content_size + 1, chunk_size): part_file_name = f"{file_path}_part_{part_id}" - part_file_paths.append(TestFile(part_file_name)) + part_file_paths.append(part_file_name) with open(part_file_name, "wb") as out_file: out_file.write(content[content_offset : content_offset + chunk_size]) part_id += 1 @@ -189,8 +138,9 @@ def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]: return part_file_paths -@reporter.step("Get file content") -def get_file_content(file_path: str | TestFile, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None) -> Any: +def get_file_content( + file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None +) -> Any: """Returns content of specified file. Args: diff --git a/src/frostfs_testlib/utils/func_utils.py b/src/frostfs_testlib/utils/func_utils.py deleted file mode 100644 index 0e22d4a..0000000 --- a/src/frostfs_testlib/utils/func_utils.py +++ /dev/null @@ -1,58 +0,0 @@ -import collections -import inspect -import sys -from typing import Callable - - -def format_by_args(__func: Callable, __title: str, *a, **kw) -> str: - params = _func_parameters(__func, *a, **kw) - args = list(map(lambda x: _represent(x), a)) - - return __title.format(*args, **params) - - -# These 2 functions are copied from allure_commons._allure -# Duplicate it here in order to be independent of allure and make some adjustments. -def _represent(item): - if isinstance(item, str): - return item - elif isinstance(item, (bytes, bytearray)): - return repr(type(item)) - else: - return repr(item) - - -def _func_parameters(func, *args, **kwargs): - parameters = {} - arg_spec = inspect.getfullargspec(func) - arg_order = list(arg_spec.args) - args_dict = dict(zip(arg_spec.args, args)) - - if arg_spec.defaults: - kwargs_defaults_dict = dict(zip(arg_spec.args[-len(arg_spec.defaults) :], arg_spec.defaults)) - parameters.update(kwargs_defaults_dict) - - if arg_spec.varargs: - arg_order.append(arg_spec.varargs) - varargs = args[len(arg_spec.args) :] - parameters.update({arg_spec.varargs: varargs} if varargs else {}) - - if arg_spec.args and arg_spec.args[0] in ["cls", "self"]: - args_dict.pop(arg_spec.args[0], None) - - if kwargs: - if sys.version_info < (3, 7): - # Sort alphabetically as old python versions does - # not preserve call order for kwargs. - arg_order.extend(sorted(list(kwargs.keys()))) - else: - # Keep py3.7 behaviour to preserve kwargs order - arg_order.extend(list(kwargs.keys())) - parameters.update(kwargs) - - parameters.update(args_dict) - - items = parameters.items() - sorted_items = sorted(map(lambda kv: (kv[0], _represent(kv[1])), items), key=lambda x: arg_order.index(x[0])) - - return collections.OrderedDict(sorted_items) diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index acbca92..a80192c 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -1,29 +1,11 @@ -import itertools import random import re import string -from datetime import datetime ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation -# if unique_name is called multiple times within the same microsecond, append 0-4 to the name so it surely unique -FUSE = itertools.cycle(range(5)) - - -def unique_name(prefix: str = "", postfix: str = ""): - """ - Generate unique short name of anything with prefix. - This should be unique in scope of multiple runs - - Args: - prefix: prefix for unique name generation - Returns: - unique name string - """ - return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{next(FUSE)}{postfix}" - def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): """ diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 0676085..26fedf5 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -1,90 +1,83 @@ import logging import re -from functools import lru_cache +import os -from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.hosting import Host, Hosting -from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE +from frostfs_testlib.hosting import Hosting +from frostfs_testlib.resources.cli import ( + FROSTFS_ADM_EXEC, + FROSTFS_AUTHMATE_EXEC, + FROSTFS_CLI_EXEC, + NEOGO_EXECUTABLE, +) +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell -from frostfs_testlib.testing.parallel import parallel logger = logging.getLogger("NeoLogger") -@reporter.step("Get local binaries versions") def get_local_binaries_versions(shell: Shell) -> dict[str, str]: versions = {} - for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]: - out = shell.exec(f"{binary} --version").stdout - versions[binary] = parse_version(out) + # Extra binaries to get version from + extra_binaries = os.getenv("EXTRA_BINARIES", "").split(',') - frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) - versions[FROSTFS_CLI_EXEC] = parse_version(frostfs_cli.version.get().stdout) + for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC, *extra_binaries]: + out = shell.exec(f"{binary} --version").stdout + versions[binary] = _parse_version(out) + + frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout) try: frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC) - versions[FROSTFS_ADM_EXEC] = parse_version(frostfs_adm.version.get().stdout) + versions[FROSTFS_ADM_EXEC] = _parse_version(frostfs_adm.version.get().stdout) except RuntimeError: logger.info(f"{FROSTFS_ADM_EXEC} not installed") out = shell.exec("aws --version").stdout out_lines = out.split("\n") versions["AWS"] = out_lines[0] if out_lines else "Unknown" - logger.info(f"Local binaries version: {out_lines[0]}") return versions -@reporter.step("Collect binaries versions from host") -def parallel_binary_verions(host: Host) -> dict[str, str]: +def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: versions_by_host = {} + for host in hosting.hosts: + binary_path_by_name = {} # Maps binary name to executable path + for service_config in host.config.services: + exec_path = service_config.attributes.get("exec_path") + if exec_path: + binary_path_by_name[service_config.name] = exec_path + for cli_config in host.config.clis: + binary_path_by_name[cli_config.name] = cli_config.exec_path - binary_path_by_name = { - **{ - svc.name[:-3]: { - "exec_path": svc.attributes.get("exec_path"), - "param": svc.attributes.get("custom_version_parameter", "--version"), - } - for svc in host.config.services - if svc.attributes.get("exec_path") and svc.attributes.get("requires_version_check", "true") == "true" - }, - **{ - cli.name: {"exec_path": cli.exec_path, "param": cli.attributes.get("custom_version_parameter", "--version")} - for cli in host.config.clis - if cli.attributes.get("requires_version_check", "true") == "true" - }, - } + shell = host.get_shell() + versions_at_host = {} + for binary_name, binary_path in binary_path_by_name.items(): + try: + result = shell.exec(f"{binary_path} --version") + versions_at_host[binary_name] = _parse_version(result.stdout) + except Exception as exc: + logger.error(f"Cannot get version for {binary_path} because of\n{exc}") + versions_at_host[binary_name] = "Unknown" + versions_by_host[host.config.address] = versions_at_host - shell = host.get_shell() - versions_at_host = {} - for binary_name, binary in binary_path_by_name.items(): - binary_path = binary["exec_path"] - try: - result = shell.exec(f"{binary_path} {binary['param']}") - version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" - versions_at_host[binary_name] = version.strip() - except Exception as exc: - logger.error(f"Cannot get version for {binary_path} because of\n{exc}") - versions_at_host[binary_name] = "Unknown" - versions_by_host[host.config.address] = versions_at_host - return versions_by_host + # Consolidate versions across all hosts + versions = {} + for host, binary_versions in versions_by_host.items(): + for name, version in binary_versions.items(): + captured_version = versions.get(name) + if captured_version: + assert ( + captured_version == version + ), f"Binary {name} has inconsistent version on host {host}" + else: + versions[name] = version + return versions -@lru_cache -def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]: - versions_by_host: dict[str, dict[str, str]] = {} - - with reporter.step("Get remote binaries versions"): - future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) - - for future in future_binary_verions: - versions_by_host.update(future.result()) - - return versions_by_host - - -def parse_version(version_output: str) -> str: - version = re.search(r"(?<=version[:=])\s?[\"\']?v?(.+)", version_output, re.IGNORECASE) - return version.group(1).strip("\"'\n\t ") if version else version_output +def _parse_version(version_output: str) -> str: + version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) + return version.group(1).strip() if version else "Unknown" diff --git a/src/frostfs_testlib/utils/wallet_utils.py b/src/frostfs_testlib/utils/wallet_utils.py index d2b4229..0c5ab1a 100644 --- a/src/frostfs_testlib/utils/wallet_utils.py +++ b/src/frostfs_testlib/utils/wallet_utils.py @@ -9,16 +9,6 @@ from neo3.wallet import wallet as neo3_wallet logger = logging.getLogger("frostfs.testlib.utils") -def __fix_wallet_schema(wallet: dict) -> None: - # Temporary function to fix wallets that do not conform to the schema - # TODO: get rid of it once issue is solved - if "name" not in wallet: - wallet["name"] = None - for account in wallet["accounts"]: - if "extra" not in account: - account["extra"] = None - - def init_wallet(wallet_path: str, wallet_password: str): """ Create new wallet and new account. @@ -43,15 +33,29 @@ def get_last_address_from_wallet(wallet_path: str, wallet_password: str): Returns: The address for the wallet. """ - wallet = load_wallet(wallet_path, wallet_password) + with open(wallet_path) as wallet_file: + wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) address = wallet.accounts[-1].address logger.info(f"got address: {address}") return address def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str: - wallet = load_wallet(wallet_path, wallet_password) - public_key_hex = str(wallet.accounts[0].public_key) + def __fix_wallet_schema(wallet: dict) -> None: + # Temporary function to fix wallets that do not conform to the schema + # TODO: get rid of it once issue is solved + if "name" not in wallet: + wallet["name"] = None + for account in wallet["accounts"]: + if "extra" not in account: + account["extra"] = None + + # Get public key from wallet file + with open(wallet_path, "r") as file: + wallet_content = json.load(file) + __fix_wallet_schema(wallet_content) + wallet_from_json = neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password) + public_key_hex = str(wallet_from_json.accounts[0].public_key) # Convert public key to specified format if format == "hex": @@ -65,9 +69,7 @@ def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = raise ValueError(f"Invalid public key format: {format}") -def load_wallet(wallet_path: str, wallet_password: str) -> neo3_wallet.Wallet: - with open(wallet_path) as wallet_file: - wallet_content = json.load(wallet_file) - - __fix_wallet_schema(wallet_content) - return neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password) +def load_wallet(path: str, passwd: str = "") -> neo3_wallet.Wallet: + with open(path, "r") as wallet_file: + wlt_data = wallet_file.read() + return neo3_wallet.Wallet.from_json(json.loads(wlt_data), password=passwd) diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index ea6d681..0000000 --- a/tests/conftest.py +++ /dev/null @@ -1,5 +0,0 @@ -import os -import sys - -app_dir = os.path.join(os.getcwd(), "src") -sys.path.insert(0, app_dir) diff --git a/tests/helpers.py b/tests/helpers.py index b7776fd..8391002 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -14,7 +14,11 @@ def format_error_details(error: Exception) -> str: Returns: String containing exception details. """ - detail_lines = traceback.format_exception(error) + detail_lines = traceback.format_exception( + etype=type(error), + value=error, + tb=error.__traceback__, + ) return "".join(detail_lines) diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py deleted file mode 100644 index 677aed4..0000000 --- a/tests/test_dataclasses.py +++ /dev/null @@ -1,33 +0,0 @@ -from typing import Any - -import pytest - -from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper -from frostfs_testlib.storage.dataclasses.acl import EACLRole -from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.object_size import ObjectSize - - -class TestDataclassesStr: - """Here we are testing important classes string representation.""" - - @pytest.mark.parametrize( - "obj, expected", - [ - (Boto3ClientWrapper, "Boto3 client"), - (AwsCliClient, "AWS CLI"), - (ObjectSize("simple", 1), "simple"), - (ObjectSize("simple", 10), "simple"), - (ObjectSize("complex", 5000), "complex"), - (ObjectSize("complex", 5555), "complex"), - (StorageNode, "StorageNode"), - (MorphChain, "MorphChain"), - (S3Gate, "S3Gate"), - (HTTPGate, "HTTPGate"), - (InnerRing, "InnerRing"), - (EACLRole.OTHERS, "OTHERS"), - ], - ) - def test_classes_string_representation(self, obj: Any, expected: str): - assert f"{obj}" == expected - assert repr(obj) == expected diff --git a/tests/test_hosting.py b/tests/test_hosting.py index 39580cb..14be8c5 100644 --- a/tests/test_hosting.py +++ b/tests/test_hosting.py @@ -15,7 +15,6 @@ class TestHosting(TestCase): HOST1 = { "address": HOST1_ADDRESS, "plugin_name": HOST1_PLUGIN, - "healthcheck_plugin_name": "basic", "attributes": HOST1_ATTRIBUTES, "clis": HOST1_CLIS, "services": HOST1_SERVICES, @@ -33,7 +32,6 @@ class TestHosting(TestCase): HOST2 = { "address": HOST2_ADDRESS, "plugin_name": HOST2_PLUGIN, - "healthcheck_plugin_name": "basic", "attributes": HOST2_ATTRIBUTES, "clis": HOST2_CLIS, "services": HOST2_SERVICES, @@ -54,14 +52,18 @@ class TestHosting(TestCase): self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN) self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES) self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS]) - self.assertListEqual(host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES]) + self.assertListEqual( + host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES] + ) host2 = hosting.get_host_by_address(self.HOST2_ADDRESS) self.assertEqual(host2.config.address, self.HOST2_ADDRESS) self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN) self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES) self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS]) - self.assertListEqual(host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES]) + self.assertListEqual( + host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES] + ) def test_get_host_by_service(self): hosting = Hosting() @@ -102,7 +104,9 @@ class TestHosting(TestCase): services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}") self.assertEqual(len(services), 2) for service in services: - self.assertEqual(service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX) + self.assertEqual( + service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX + ) service1 = hosting.find_service_configs(self.SERVICE1["name"]) self.assertEqual(len(service1), 1) diff --git a/tests/test_load_config.py b/tests/test_load_config.py deleted file mode 100644 index fbeb587..0000000 --- a/tests/test_load_config.py +++ /dev/null @@ -1,798 +0,0 @@ -from dataclasses import Field, dataclass, fields, is_dataclass -from typing import Any, get_args - -import pytest - -from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom -from frostfs_testlib.load.runners import DefaultRunner -from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME -from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController - - -@dataclass -class MetaTestField: - field: Field - field_type: type - instance: Any - - -class TestLoadConfig: - @pytest.fixture - def set_empty(self, request: pytest.FixtureRequest): - # Workaround for verify - if "param" in request.__dict__ and request.param: - return request.param - - return False - - @pytest.fixture - def load_type(self, request: pytest.FixtureRequest): - # Workaround for verify - if "param" in request.__dict__ and request.param: - return request.param - - return None - - @pytest.fixture - def load_params(self, load_type: LoadType, set_empty: bool, request: pytest.FixtureRequest): - load_scenario = request.param - return self._get_filled_load_params(load_type, load_scenario, set_empty) - - def test_load_params_only_load_type_required(self): - load_params = LoadParams(load_type=LoadType.S3) - expected = "s3" - assert repr(load_params) == expected - assert f"{load_params}" == expected - - def test_load_params_init_time(self): - load_params = LoadParams(load_type=LoadType.S3) - vus = 100 - - load_params.vu_init_time = BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME - # Used in time calculations - load_params.readers = vus - load_params.writers = vus - load_params.preallocated_readers = vus - load_params.preallocated_writers = vus - - # Not used in time calculations - load_params.deleters = vus - load_params.preallocated_deleters = vus - - expected = vus * 4 * BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME - actual = load_params.get_init_time() - assert actual == expected, "Incorrect time for get_init_time()" - - def test_load_params_initially_have_all_values_none(self): - load_params = LoadParams(load_type=LoadType.S3) - self._check_all_values_none(load_params, ["load_type", "scenario"]) - - def test_preset_initially_have_all_values_none(self): - preset = Preset() - self._check_all_values_none(preset) - - @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) - def test_string_representation_s3_car(self, load_params: LoadParams): - load_params.object_size = 524288 - expected = "s3_car 512 MiB, write_rate=10, read_rate=9, delete_rate=11, preallocated_writers=20, preallocated_readers=20, preallocated_deleters=21" - assert f"{load_params}" == expected - assert repr(load_params) == expected - - @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) - def test_string_representation_grpc(self, load_params: LoadParams): - load_params.object_size = 512 - expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" - assert f"{load_params}" == expected - assert repr(load_params) == expected - - @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) - def test_load_controller_string_representation(self, load_params: LoadParams): - load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL - load_params.object_size = 512 - background_load_controller = BackgroundLoadController("tmp", load_params, None, None, DefaultRunner(None)) - expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" - assert f"{background_load_controller}" == expected - assert repr(background_load_controller) == expected - - def test_load_set_id_changes_fields(self): - load_params = LoadParams(load_type=LoadType.S3) - load_params.preset = Preset() - load_params.read_from = ReadFrom["REGISTRY"] - load_params.working_dir = "/tmp" - load_params.set_id("test_id") - - assert load_params.registry_file == "/tmp/test_id_registry.bolt" - assert load_params.preset.pregen_json == "/tmp/test_id_prepare.json" - assert load_params.load_id == "test_id" - - # No other values should be changed - self._check_all_values_none( - load_params, - [ - "load_type", - "working_dir", - "load_id", - "registry_file", - "preset", - "scenario", - "read_from", - ], - ) - self._check_all_values_none(load_params.preset, ["pregen_json", "scenario"]) - - @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) - def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '11'", - "--acl 'acl'", - "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", - "--out 'pregen_json'", - "--workers '7'", - "--containers '16'", - "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", - "--ignore-errors", - "--sleep '19'", - "--local", - ] - expected_env_vars = { - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "REGISTRY_FILE": "registry_file", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "WRITERS": 7, - "READERS": 7, - "DELETERS": 8, - "READ_AGE": 8, - "STREAMING": 9, - "K6_OUT": "output", - "PREGEN_JSON": "pregen_json", - "PREPARE_LOCALLY": True, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.gRPC_CAR], indirect=True) - def test_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '11'", - "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", - "--out 'pregen_json'", - "--workers '7'", - "--containers '16'", - "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", - "--ignore-errors", - "--sleep '19'", - "--local", - "--acl 'acl'", - ] - expected_env_vars = { - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "K6_OUT": "output", - "REGISTRY_FILE": "registry_file", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "MAX_WRITERS": 11, - "MAX_READERS": 11, - "MAX_DELETERS": 12, - "PRE_ALLOC_DELETERS": 21, - "PRE_ALLOC_READERS": 20, - "PRE_ALLOC_WRITERS": 20, - "PREGEN_JSON": "pregen_json", - "TIME_UNIT": "time_unit", - "WRITE_RATE": 10, - "READ_RATE": 9, - "READ_AGE": 8, - "DELETE_RATE": 11, - "STREAMING": 9, - "PREPARE_LOCALLY": True, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.S3], indirect=True) - def test_argument_parsing_for_s3_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '11'", - "--preload_obj '13'", - "--no-verify-ssl", - "--out 'pregen_json'", - "--workers '7'", - "--buckets '13'", - "--location 's3_location' --location 's3_location_2'", - "--ignore-errors", - "--sleep '19'", - "--acl 'acl'", - ] - expected_env_vars = { - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "REGISTRY_FILE": "registry_file", - "K6_OUT": "output", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "WRITERS": 7, - "READERS": 7, - "DELETERS": 8, - "READ_AGE": 8, - "STREAMING": 9, - "NO_VERIFY_SSL": True, - "PREGEN_JSON": "pregen_json", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) - def test_argument_parsing_for_s3_car_scenario_with_stringed_time(self, load_params: LoadParams): - load_params.load_time = "2d3h5min" - expected_preset_args = [ - "--size '11'", - "--preload_obj '13'", - "--no-verify-ssl", - "--out 'pregen_json'", - "--workers '7'", - "--buckets '13'", - "--location 's3_location' --location 's3_location_2'", - "--ignore-errors", - "--sleep '19'", - "--acl 'acl'", - ] - expected_env_vars = { - "DURATION": 183900, - "WRITE_OBJ_SIZE": 11, - "REGISTRY_FILE": "registry_file", - "K6_OUT": "output", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "NO_VERIFY_SSL": True, - "MAX_WRITERS": 11, - "MAX_READERS": 11, - "MAX_DELETERS": 12, - "PRE_ALLOC_DELETERS": 21, - "PRE_ALLOC_READERS": 20, - "PRE_ALLOC_WRITERS": 20, - "PREGEN_JSON": "pregen_json", - "TIME_UNIT": "time_unit", - "WRITE_RATE": 10, - "READ_RATE": 9, - "READ_AGE": 8, - "STREAMING": 9, - "DELETE_RATE": 11, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) - def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '11'", - "--preload_obj '13'", - "--no-verify-ssl", - "--out 'pregen_json'", - "--workers '7'", - "--buckets '13'", - "--location 's3_location' --location 's3_location_2'", - "--ignore-errors", - "--sleep '19'", - "--acl 'acl'", - ] - expected_env_vars = { - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "REGISTRY_FILE": "registry_file", - "K6_OUT": "output", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "NO_VERIFY_SSL": True, - "MAX_WRITERS": 11, - "MAX_READERS": 11, - "MAX_DELETERS": 12, - "PRE_ALLOC_DELETERS": 21, - "PRE_ALLOC_READERS": 20, - "PRE_ALLOC_WRITERS": 20, - "PREGEN_JSON": "pregen_json", - "TIME_UNIT": "time_unit", - "WRITE_RATE": 10, - "READ_RATE": 9, - "READ_AGE": 8, - "STREAMING": 9, - "DELETE_RATE": 11, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True) - def test_argument_parsing_for_http_scenario(self, load_params: LoadParams): - load_params.preset.local = False - expected_preset_args = [ - "--no-verify-ssl", - "--size '11'", - "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", - "--out 'pregen_json'", - "--workers '7'", - "--containers '16'", - "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", - "--ignore-errors", - "--sleep '19'", - "--acl 'acl'", - ] - expected_env_vars = { - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "K6_OUT": "output", - "NO_VERIFY_SSL": True, - "REGISTRY_FILE": "registry_file", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "WRITERS": 7, - "READERS": 7, - "DELETERS": 8, - "READ_AGE": 8, - "STREAMING": 9, - "PREGEN_JSON": "pregen_json", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True) - def test_argument_parsing_for_local_scenario(self, load_params: LoadParams): - load_params.preset.local = False - expected_preset_args = [ - "--size '11'", - "--preload_obj '13'", - "--retry '24'", - "--rule 'rule' --rule 'rule_2'", - "--out 'pregen_json'", - "--workers '7'", - "--containers '16'", - "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", - "--ignore-errors", - "--sleep '19'", - "--acl 'acl'", - ] - expected_env_vars = { - "CONFIG_DIR": "config_dir", - "CONFIG_FILE": "config_file", - "DURATION": 9, - "WRITE_OBJ_SIZE": 11, - "K6_OUT": "output", - "REGISTRY_FILE": "registry_file", - "K6_MIN_ITERATION_DURATION": "min_iteration_duration", - "K6_SETUP_TIMEOUT": "setup_timeout", - "WRITERS": 7, - "READERS": 7, - "DELETERS": 8, - "READ_AGE": 8, - "STREAMING": 9, - "MAX_TOTAL_SIZE_GB": 17, - "PREGEN_JSON": "pregen_json", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize( - "input, value, params", - [ - (["A C ", " B"], ["A C", "B"], [f"--policy 'A C' --policy 'B'"]), - (" A ", ["A"], ["--policy 'A'"]), - (" A , B ", ["A , B"], ["--policy 'A , B'"]), - ([" A", "B "], ["A", "B"], ["--policy 'A' --policy 'B'"]), - (None, None, []), - ], - ) - def test_grpc_list_parsing_formatter(self, input, value, params): - load_params = LoadParams(LoadType.gRPC) - load_params.preset = Preset() - load_params.preset.container_placement_policy = input - assert load_params.preset.container_placement_policy == value - - self._check_preset_params(load_params, params) - - @pytest.mark.parametrize( - "input, value, params", - [ - (["A C ", " B"], ["A C", "B"], [f"--location 'A C' --location 'B'"]), - (" A ", ["A"], ["--location 'A'"]), - (" A , B ", ["A , B"], ["--location 'A , B'"]), - ([" A", "B "], ["A", "B"], ["--location 'A' --location 'B'"]), - (None, None, []), - ], - ) - def test_s3_list_parsing_formatter(self, input, value, params): - load_params = LoadParams(LoadType.S3) - load_params.preset = Preset() - load_params.preset.s3_location = input - assert load_params.preset.s3_location == value - - self._check_preset_params(load_params, params) - - @pytest.mark.parametrize( - "load_type, input, value, params", - [ - (LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]), - (LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]), - (LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]), - (LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]), - (LoadType.gRPC, None, None, []), - (LoadType.S3, ["A C ", " B"], ["A C", "B"], []), - (LoadType.S3, None, None, []), - ], - ) - def test_ape_list_parsing_formatter(self, load_type, input, value, params): - load_params = LoadParams(load_type) - load_params.preset = Preset() - load_params.preset.rule = input - assert load_params.preset.rule == value - - self._check_preset_params(load_params, params) - - @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) - def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): - expected_env_vars = { - "CLIENTS": 14, - "REGISTRY_FILE": "registry_file", - "K6_SETUP_TIMEOUT": "setup_timeout", - "NO_VERIFY_SSL": True, - "TIME_LIMIT": 11, - } - - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True) - def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): - expected_env_vars = { - "CLIENTS": 14, - "REGISTRY_FILE": "registry_file", - "K6_SETUP_TIMEOUT": "setup_timeout", - "NO_VERIFY_SSL": True, - "TIME_LIMIT": 11, - } - - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC, True)], indirect=True) - def test_empty_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--retry '0'", - "--rule ''", - "--out ''", - "--workers '0'", - "--containers '0'", - "--policy ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "WRITERS": 0, - "READERS": 0, - "DELETERS": 0, - "READ_AGE": 0, - "STREAMING": 0, - "PREGEN_JSON": "", - "PREPARE_LOCALLY": False, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True) - def test_empty_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--retry '0'", - "--rule ''", - "--out ''", - "--workers '0'", - "--containers '0'", - "--policy ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "MAX_WRITERS": 0, - "MAX_READERS": 0, - "MAX_DELETERS": 0, - "PRE_ALLOC_DELETERS": 0, - "PRE_ALLOC_READERS": 0, - "PRE_ALLOC_WRITERS": 0, - "PREGEN_JSON": "", - "TIME_UNIT": "", - "WRITE_RATE": 0, - "READ_RATE": 0, - "DELETE_RATE": 0, - "READ_AGE": 0, - "STREAMING": 0, - "PREPARE_LOCALLY": False, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3, True)], indirect=True) - def test_empty_argument_parsing_for_s3_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--out ''", - "--workers '0'", - "--buckets '0'", - "--location ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "WRITERS": 0, - "READERS": 0, - "DELETERS": 0, - "READ_AGE": 0, - "STREAMING": 0, - "NO_VERIFY_SSL": False, - "PREGEN_JSON": "", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3_CAR, True)], indirect=True) - def test_empty_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--out ''", - "--workers '0'", - "--buckets '0'", - "--location ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "NO_VERIFY_SSL": False, - "MAX_WRITERS": 0, - "MAX_READERS": 0, - "MAX_DELETERS": 0, - "PRE_ALLOC_DELETERS": 0, - "PRE_ALLOC_READERS": 0, - "PRE_ALLOC_WRITERS": 0, - "PREGEN_JSON": "", - "TIME_UNIT": "", - "WRITE_RATE": 0, - "READ_RATE": 0, - "DELETE_RATE": 0, - "READ_AGE": 0, - "STREAMING": 0, - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.HTTP, True)], indirect=True) - def test_empty_argument_parsing_for_http_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--retry '0'", - "--rule ''", - "--out ''", - "--workers '0'", - "--containers '0'", - "--policy ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "NO_VERIFY_SSL": False, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "WRITERS": 0, - "READERS": 0, - "DELETERS": 0, - "READ_AGE": 0, - "STREAMING": 0, - "PREGEN_JSON": "", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.LOCAL, True)], indirect=True) - def test_empty_argument_parsing_for_local_scenario(self, load_params: LoadParams): - expected_preset_args = [ - "--size '0'", - "--preload_obj '0'", - "--retry '0'", - "--rule ''", - "--out ''", - "--workers '0'", - "--containers '0'", - "--policy ''", - "--sleep '0'", - "--acl ''", - ] - expected_env_vars = { - "CONFIG_DIR": "", - "CONFIG_FILE": "", - "DURATION": 0, - "WRITE_OBJ_SIZE": 0, - "REGISTRY_FILE": "", - "K6_OUT": "", - "K6_MIN_ITERATION_DURATION": "", - "K6_SETUP_TIMEOUT": "", - "MAX_TOTAL_SIZE_GB": 0, - "WRITERS": 0, - "READERS": 0, - "DELETERS": 0, - "READ_AGE": 0, - "STREAMING": 0, - "PREGEN_JSON": "", - } - - self._check_preset_params(load_params, expected_preset_args) - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize( - "load_params, load_type, set_empty", - [(LoadScenario.VERIFY, LoadType.S3, True)], - indirect=True, - ) - def test_empty_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): - expected_env_vars = { - "CLIENTS": 0, - "REGISTRY_FILE": "", - "K6_SETUP_TIMEOUT": "", - "NO_VERIFY_SSL": False, - "TIME_LIMIT": 0, - } - - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize( - "load_params, load_type, set_empty", - [(LoadScenario.VERIFY, LoadType.gRPC, True)], - indirect=True, - ) - def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): - expected_env_vars = { - "CLIENTS": 0, - "REGISTRY_FILE": "", - "K6_SETUP_TIMEOUT": "", - "NO_VERIFY_SSL": False, - "TIME_LIMIT": 0, - } - - self._check_env_vars(load_params, expected_env_vars) - - @pytest.mark.parametrize( - "load_params, load_type", - [(LoadScenario.gRPC, LoadType.gRPC)], - indirect=True, - ) - @pytest.mark.parametrize( - "load_time, expected_seconds", - [ - (300, 300), - ("2d3h45min", 186300), - ("1d6h", 108000), - ("1d", 86400), - ("1d1min", 86460), - ("2h", 7200), - ("2h2min", 7320), - ], - ) - def test_convert_time_to_seconds(self, load_params: LoadParams, load_time: str | int, expected_seconds: int): - load_params.load_time = load_time - assert load_params.load_time == expected_seconds - - def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]): - preset_parameters = load_params.get_preset_arguments() - assert sorted(preset_parameters) == sorted(expected_preset_args) - - def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]): - env_vars = load_params.get_k6_vars() - assert env_vars == expected_env_vars - - def _check_all_values_none(self, dataclass, skip_fields=None): - if skip_fields is None: - skip_fields = [] - - dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields] - for field in dataclass_fields: - value = getattr(dataclass, field.name) - assert value is None, f"{field.name} is not None" - - def _check_all_values_not_none(self, dataclass, skip_fields=None): - if skip_fields is None: - skip_fields = [] - - dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields] - for field in dataclass_fields: - value = getattr(dataclass, field.name) - assert value is not None, f"{field.name} is not None" - - def _get_filled_load_params(self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False) -> LoadParams: - load_type_map = { - LoadScenario.S3: LoadType.S3, - LoadScenario.S3_CAR: LoadType.S3, - LoadScenario.gRPC: LoadType.gRPC, - LoadScenario.gRPC_CAR: LoadType.gRPC, - LoadScenario.LOCAL: LoadType.gRPC, - LoadScenario.HTTP: LoadType.HTTP, - } - load_type = load_type_map[load_scenario] if not load_type else load_type - - load_params = LoadParams(load_type) - load_params.scenario = load_scenario - load_params.preset = Preset() - - meta_fields = self._get_meta_fields(load_params) - for field in meta_fields: - if getattr(field.instance, field.field.name) is None and load_params.scenario in field.field.metadata["applicable_scenarios"]: - value_to_set_map = { - int: 0 if set_emtpy else len(field.field.name), - float: 0 if set_emtpy else len(field.field.name), - str: "" if set_emtpy else field.field.name, - list[str]: "" if set_emtpy else [field.field.name, f"{field.field.name}_2"], - bool: False if set_emtpy else True, - } - value_to_set = value_to_set_map[field.field_type] - setattr(field.instance, field.field.name, value_to_set) - - return load_params - - def _get_actual_field_type(self, field: Field) -> type: - return get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) - - def _get_meta_fields(self, instance): - data_fields = fields(instance) - fields_with_data = [MetaTestField(field, self._get_actual_field_type(field), instance) for field in data_fields if field.metadata] - - for field in data_fields: - actual_field_type = self._get_actual_field_type(field) - if is_dataclass(actual_field_type) and getattr(instance, field.name): - fields_with_data += self._get_meta_fields(getattr(instance, field.name)) - - return fields_with_data or [] diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py index ecd8c3c..4d1c0fd 100644 --- a/tests/test_ssh_shell.py +++ b/tests/test_ssh_shell.py @@ -1,68 +1,50 @@ import os - -import pytest +from unittest import SkipTest, TestCase from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput -from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell +from frostfs_testlib.shell.ssh_shell import SSHShell from helpers import format_error_details, get_output_lines -def get_shell(host: str): +def init_shell() -> SSHShell: + host = os.getenv("SSH_SHELL_HOST") port = os.getenv("SSH_SHELL_PORT", "22") login = os.getenv("SSH_SHELL_LOGIN") - - password = os.getenv("SSH_SHELL_PASSWORD", "") - private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH", "") - private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE", "") + private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH") + private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE") if not all([host, login, private_key_path, private_key_passphrase]): # TODO: in the future we might use https://pypi.org/project/mock-ssh-server, # at the moment it is not suitable for us because of its issues with stdin - pytest.skip("SSH connection is not configured") + raise SkipTest("SSH connection is not configured") return SSHShell( host=host, port=port, login=login, - password=password, private_key_path=private_key_path, private_key_passphrase=private_key_passphrase, ) -@pytest.fixture(scope="module") -def shell() -> SSHShell: - return get_shell(host=os.getenv("SSH_SHELL_HOST")) +class TestSSHShellInteractive(TestCase): + @classmethod + def setUpClass(cls): + cls.shell = init_shell() - -@pytest.fixture(scope="module") -def shell_same_host() -> SSHShell: - return get_shell(host=os.getenv("SSH_SHELL_HOST")) - - -@pytest.fixture(scope="module") -def shell_another_host() -> SSHShell: - return get_shell(host=os.getenv("SSH_SHELL_HOST_2")) - - -@pytest.fixture(scope="function", autouse=True) -def reset_connection(): - provider = SshConnectionProvider() - provider.drop_all() - - -class TestSSHShellInteractive: - def test_command_with_one_prompt(self, shell: SSHShell): + def test_command_with_one_prompt(self): script = "password = input('Password: '); print('\\n' + password)" inputs = [InteractiveInput(prompt_pattern="Password", input="test")] - result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) + result = self.shell.exec( + f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) + ) - assert result.return_code == 0 - assert ["Password: test", "test"] == get_output_lines(result) - assert not result.stderr + self.assertEqual(0, result.return_code) + self.assertEqual(["Password: test", "test"], get_output_lines(result)) + self.assertEqual("", result.stderr) - def test_command_with_several_prompts(self, shell: SSHShell): + def test_command_with_several_prompts(self): script = ( "input1 = input('Input1: '); print('\\n' + input1); " "input2 = input('Input2: '); print('\\n' + input2)" @@ -72,132 +54,86 @@ class TestSSHShellInteractive: InteractiveInput(prompt_pattern="Input2", input="test2"), ] - result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) + result = self.shell.exec( + f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) + ) - assert result.return_code == 0 - assert ["Input1: test1", "test1", "Input2: test2", "test2"] == get_output_lines(result) - assert not result.stderr + self.assertEqual(0, result.return_code) + self.assertEqual( + ["Input1: test1", "test1", "Input2: test2", "test2"], get_output_lines(result) + ) + self.assertEqual("", result.stderr) - def test_invalid_command_with_check(self, shell: SSHShell): + def test_invalid_command_with_check(self): script = "invalid script" inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - with pytest.raises(RuntimeError) as raised: - shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) + with self.assertRaises(RuntimeError) as raised: + self.shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - error = format_error_details(raised.value) - assert "SyntaxError" in error - assert "return code: 1" in error + error = format_error_details(raised.exception) + self.assertIn("SyntaxError", error) + self.assertIn("return code: 1", error) - def test_invalid_command_without_check(self, shell: SSHShell): + def test_invalid_command_without_check(self): script = "invalid script" inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - result = shell.exec( + result = self.shell.exec( f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs, check=False), ) - assert "SyntaxError" in result.stdout - assert result.return_code == 1 + self.assertIn("SyntaxError", result.stdout) + self.assertEqual(1, result.return_code) - def test_non_existing_binary(self, shell: SSHShell): + def test_non_existing_binary(self): inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - with pytest.raises(RuntimeError) as raised: - shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) + with self.assertRaises(RuntimeError) as raised: + self.shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) - error = format_error_details(raised.value) - assert "return code: 127" in error + error = format_error_details(raised.exception) + self.assertIn("return code: 127", error) -class TestSSHShellNonInteractive: - def test_correct_command(self, shell: SSHShell): +class TestSSHShellNonInteractive(TestCase): + @classmethod + def setUpClass(cls): + cls.shell = init_shell() + + def test_correct_command(self): script = "print('test')" - result = shell.exec(f'python3 -c "{script}"') + result = self.shell.exec(f'python3 -c "{script}"') - assert result.return_code == 0 - assert result.stdout.strip() == "test" - assert not result.stderr + self.assertEqual(0, result.return_code) + self.assertEqual("test", result.stdout.strip()) + self.assertEqual("", result.stderr) - def test_invalid_command_with_check(self, shell: SSHShell): + def test_invalid_command_with_check(self): script = "invalid script" - with pytest.raises(RuntimeError) as raised: - shell.exec(f'python3 -c "{script}"') + with self.assertRaises(RuntimeError) as raised: + self.shell.exec(f'python3 -c "{script}"') - error = format_error_details(raised.value) - assert "Error" in error - assert "return code: 1" in error + error = format_error_details(raised.exception) + self.assertIn("Error", error) + self.assertIn("return code: 1", error) - def test_invalid_command_without_check(self, shell: SSHShell): + def test_invalid_command_without_check(self): script = "invalid script" - result = shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) + result = self.shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) - assert result.return_code == 1 + self.assertEqual(1, result.return_code) # TODO: we have inconsistency with local shell here, the local shell captures error info # in stdout while ssh shell captures it in stderr - assert "Error" in result.stderr + self.assertIn("Error", result.stderr) - def test_non_existing_binary(self, shell: SSHShell): - with pytest.raises(RuntimeError) as raised: - shell.exec("not-a-command") + def test_non_existing_binary(self): + with self.assertRaises(RuntimeError) as exc: + self.shell.exec("not-a-command") - error = format_error_details(raised.value) - assert "Error" in error - assert "return code: 127" in error - - -class TestSSHShellConnection: - def test_connection_provider_is_singleton(self): - provider = SshConnectionProvider() - provider2 = SshConnectionProvider() - assert id(provider) == id(provider2) - - def test_connection_provider_has_creds(self, shell: SSHShell): - provider = SshConnectionProvider() - assert len(provider.creds) == 1 - assert len(provider.connections) == 0 - - def test_connection_provider_has_only_one_connection(self, shell: SSHShell): - provider = SshConnectionProvider() - assert len(provider.connections) == 0 - shell.exec("echo 1") - assert len(provider.connections) == 1 - shell.exec("echo 2") - assert len(provider.connections) == 1 - shell.drop() - assert len(provider.connections) == 0 - - def test_connection_same_host(self, shell: SSHShell, shell_same_host: SSHShell): - provider = SshConnectionProvider() - assert len(provider.connections) == 0 - - shell.exec("echo 1") - assert len(provider.connections) == 1 - - shell_same_host.exec("echo 2") - assert len(provider.connections) == 1 - - shell.drop() - assert len(provider.connections) == 0 - - shell.exec("echo 3") - assert len(provider.connections) == 1 - - def test_connection_another_host(self, shell: SSHShell, shell_another_host: SSHShell): - provider = SshConnectionProvider() - assert len(provider.connections) == 0 - - shell.exec("echo 1") - assert len(provider.connections) == 1 - - shell_another_host.exec("echo 2") - assert len(provider.connections) == 2 - - shell.drop() - assert len(provider.connections) == 1 - - shell_another_host.drop() - assert len(provider.connections) == 0 + error = format_error_details(exc.exception) + self.assertIn("Error", error) + self.assertIn("return code: 127", error)